]> err.no Git - linux-2.6/blob - drivers/net/s2io.c
S2io: Enable all the error and alarm indications
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240         {"rmac_ttl_1519_4095_frms"},
241         {"rmac_ttl_4096_8191_frms"},
242         {"rmac_ttl_8192_max_frms"},
243         {"rmac_ttl_gt_max_frms"},
244         {"rmac_osized_alt_frms"},
245         {"rmac_jabber_alt_frms"},
246         {"rmac_gt_max_alt_frms"},
247         {"rmac_vlan_frms"},
248         {"rmac_len_discard"},
249         {"rmac_fcs_discard"},
250         {"rmac_pf_discard"},
251         {"rmac_da_discard"},
252         {"rmac_red_discard"},
253         {"rmac_rts_discard"},
254         {"rmac_ingm_full_discard"},
255         {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259         {"\n DRIVER STATISTICS"},
260         {"single_bit_ecc_errs"},
261         {"double_bit_ecc_errs"},
262         {"parity_err_cnt"},
263         {"serious_err_cnt"},
264         {"soft_reset_cnt"},
265         {"fifo_full_cnt"},
266         {"ring_full_cnt"},
267         ("alarm_transceiver_temp_high"),
268         ("alarm_transceiver_temp_low"),
269         ("alarm_laser_bias_current_high"),
270         ("alarm_laser_bias_current_low"),
271         ("alarm_laser_output_power_high"),
272         ("alarm_laser_output_power_low"),
273         ("warn_transceiver_temp_high"),
274         ("warn_transceiver_temp_low"),
275         ("warn_laser_bias_current_high"),
276         ("warn_laser_bias_current_low"),
277         ("warn_laser_output_power_high"),
278         ("warn_laser_output_power_low"),
279         ("lro_aggregated_pkts"),
280         ("lro_flush_both_count"),
281         ("lro_out_of_sequence_pkts"),
282         ("lro_flush_due_to_max_pkts"),
283         ("lro_avg_aggr_pkts"),
284         ("mem_alloc_fail_cnt"),
285         ("pci_map_fail_cnt"),
286         ("watchdog_timer_cnt"),
287         ("mem_allocated"),
288         ("mem_freed"),
289         ("link_up_cnt"),
290         ("link_down_cnt"),
291         ("link_up_time"),
292         ("link_down_time"),
293         ("tx_tcode_buf_abort_cnt"),
294         ("tx_tcode_desc_abort_cnt"),
295         ("tx_tcode_parity_err_cnt"),
296         ("tx_tcode_link_loss_cnt"),
297         ("tx_tcode_list_proc_err_cnt"),
298         ("rx_tcode_parity_err_cnt"),
299         ("rx_tcode_abort_cnt"),
300         ("rx_tcode_parity_abort_cnt"),
301         ("rx_tcode_rda_fail_cnt"),
302         ("rx_tcode_unkn_prot_cnt"),
303         ("rx_tcode_fcs_err_cnt"),
304         ("rx_tcode_buf_size_err_cnt"),
305         ("rx_tcode_rxd_corrupt_cnt"),
306         ("rx_tcode_unkn_err_cnt")
307 };
308
309 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
310 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
311                                         ETH_GSTRING_LEN
312 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
313
314 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
315 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
316
317 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
318 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
319
320 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
321 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
322
323 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
324                         init_timer(&timer);                     \
325                         timer.function = handle;                \
326                         timer.data = (unsigned long) arg;       \
327                         mod_timer(&timer, (jiffies + exp))      \
328
329 /* Add the vlan */
330 static void s2io_vlan_rx_register(struct net_device *dev,
331                                         struct vlan_group *grp)
332 {
333         struct s2io_nic *nic = dev->priv;
334         unsigned long flags;
335
336         spin_lock_irqsave(&nic->tx_lock, flags);
337         nic->vlgrp = grp;
338         spin_unlock_irqrestore(&nic->tx_lock, flags);
339 }
340
341 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
342 static int vlan_strip_flag;
343
344 /*
345  * Constants to be programmed into the Xena's registers, to configure
346  * the XAUI.
347  */
348
349 #define END_SIGN        0x0
350 static const u64 herc_act_dtx_cfg[] = {
351         /* Set address */
352         0x8000051536750000ULL, 0x80000515367500E0ULL,
353         /* Write data */
354         0x8000051536750004ULL, 0x80000515367500E4ULL,
355         /* Set address */
356         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
357         /* Write data */
358         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
359         /* Set address */
360         0x801205150D440000ULL, 0x801205150D4400E0ULL,
361         /* Write data */
362         0x801205150D440004ULL, 0x801205150D4400E4ULL,
363         /* Set address */
364         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
365         /* Write data */
366         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
367         /* Done */
368         END_SIGN
369 };
370
371 static const u64 xena_dtx_cfg[] = {
372         /* Set address */
373         0x8000051500000000ULL, 0x80000515000000E0ULL,
374         /* Write data */
375         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
376         /* Set address */
377         0x8001051500000000ULL, 0x80010515000000E0ULL,
378         /* Write data */
379         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
380         /* Set address */
381         0x8002051500000000ULL, 0x80020515000000E0ULL,
382         /* Write data */
383         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
384         END_SIGN
385 };
386
387 /*
388  * Constants for Fixing the MacAddress problem seen mostly on
389  * Alpha machines.
390  */
391 static const u64 fix_mac[] = {
392         0x0060000000000000ULL, 0x0060600000000000ULL,
393         0x0040600000000000ULL, 0x0000600000000000ULL,
394         0x0020600000000000ULL, 0x0060600000000000ULL,
395         0x0020600000000000ULL, 0x0060600000000000ULL,
396         0x0020600000000000ULL, 0x0060600000000000ULL,
397         0x0020600000000000ULL, 0x0060600000000000ULL,
398         0x0020600000000000ULL, 0x0060600000000000ULL,
399         0x0020600000000000ULL, 0x0060600000000000ULL,
400         0x0020600000000000ULL, 0x0060600000000000ULL,
401         0x0020600000000000ULL, 0x0060600000000000ULL,
402         0x0020600000000000ULL, 0x0060600000000000ULL,
403         0x0020600000000000ULL, 0x0060600000000000ULL,
404         0x0020600000000000ULL, 0x0000600000000000ULL,
405         0x0040600000000000ULL, 0x0060600000000000ULL,
406         END_SIGN
407 };
408
409 MODULE_LICENSE("GPL");
410 MODULE_VERSION(DRV_VERSION);
411
412
413 /* Module Loadable parameters. */
414 S2IO_PARM_INT(tx_fifo_num, 1);
415 S2IO_PARM_INT(rx_ring_num, 1);
416
417
418 S2IO_PARM_INT(rx_ring_mode, 1);
419 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
420 S2IO_PARM_INT(rmac_pause_time, 0x100);
421 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
422 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
423 S2IO_PARM_INT(shared_splits, 0);
424 S2IO_PARM_INT(tmac_util_period, 5);
425 S2IO_PARM_INT(rmac_util_period, 5);
426 S2IO_PARM_INT(bimodal, 0);
427 S2IO_PARM_INT(l3l4hdr_size, 128);
428 /* Frequency of Rx desc syncs expressed as power of 2 */
429 S2IO_PARM_INT(rxsync_frequency, 3);
430 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
431 S2IO_PARM_INT(intr_type, 2);
432 /* Large receive offload feature */
433 S2IO_PARM_INT(lro, 0);
434 /* Max pkts to be aggregated by LRO at one time. If not specified,
435  * aggregation happens until we hit max IP pkt size(64K)
436  */
437 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
438 S2IO_PARM_INT(indicate_max_pkts, 0);
439
440 S2IO_PARM_INT(napi, 1);
441 S2IO_PARM_INT(ufo, 0);
442 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
443
444 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
445     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
446 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
447     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
448 static unsigned int rts_frm_len[MAX_RX_RINGS] =
449     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
450
451 module_param_array(tx_fifo_len, uint, NULL, 0);
452 module_param_array(rx_ring_sz, uint, NULL, 0);
453 module_param_array(rts_frm_len, uint, NULL, 0);
454
455 /*
456  * S2IO device table.
457  * This table lists all the devices that this driver supports.
458  */
459 static struct pci_device_id s2io_tbl[] __devinitdata = {
460         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
461          PCI_ANY_ID, PCI_ANY_ID},
462         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
463          PCI_ANY_ID, PCI_ANY_ID},
464         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
465          PCI_ANY_ID, PCI_ANY_ID},
466         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
467          PCI_ANY_ID, PCI_ANY_ID},
468         {0,}
469 };
470
471 MODULE_DEVICE_TABLE(pci, s2io_tbl);
472
473 static struct pci_error_handlers s2io_err_handler = {
474         .error_detected = s2io_io_error_detected,
475         .slot_reset = s2io_io_slot_reset,
476         .resume = s2io_io_resume,
477 };
478
479 static struct pci_driver s2io_driver = {
480       .name = "S2IO",
481       .id_table = s2io_tbl,
482       .probe = s2io_init_nic,
483       .remove = __devexit_p(s2io_rem_nic),
484       .err_handler = &s2io_err_handler,
485 };
486
487 /* A simplifier macro used both by init and free shared_mem Fns(). */
488 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
489
490 /**
491  * init_shared_mem - Allocation and Initialization of Memory
492  * @nic: Device private variable.
493  * Description: The function allocates all the memory areas shared
494  * between the NIC and the driver. This includes Tx descriptors,
495  * Rx descriptors and the statistics block.
496  */
497
498 static int init_shared_mem(struct s2io_nic *nic)
499 {
500         u32 size;
501         void *tmp_v_addr, *tmp_v_addr_next;
502         dma_addr_t tmp_p_addr, tmp_p_addr_next;
503         struct RxD_block *pre_rxd_blk = NULL;
504         int i, j, blk_cnt;
505         int lst_size, lst_per_page;
506         struct net_device *dev = nic->dev;
507         unsigned long tmp;
508         struct buffAdd *ba;
509
510         struct mac_info *mac_control;
511         struct config_param *config;
512         unsigned long long mem_allocated = 0;
513
514         mac_control = &nic->mac_control;
515         config = &nic->config;
516
517
518         /* Allocation and initialization of TXDLs in FIOFs */
519         size = 0;
520         for (i = 0; i < config->tx_fifo_num; i++) {
521                 size += config->tx_cfg[i].fifo_len;
522         }
523         if (size > MAX_AVAILABLE_TXDS) {
524                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
525                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
526                 return -EINVAL;
527         }
528
529         lst_size = (sizeof(struct TxD) * config->max_txds);
530         lst_per_page = PAGE_SIZE / lst_size;
531
532         for (i = 0; i < config->tx_fifo_num; i++) {
533                 int fifo_len = config->tx_cfg[i].fifo_len;
534                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
535                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
536                                                           GFP_KERNEL);
537                 if (!mac_control->fifos[i].list_info) {
538                         DBG_PRINT(INFO_DBG,
539                                   "Malloc failed for list_info\n");
540                         return -ENOMEM;
541                 }
542                 mem_allocated += list_holder_size;
543                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
544         }
545         for (i = 0; i < config->tx_fifo_num; i++) {
546                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
547                                                 lst_per_page);
548                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
549                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
550                     config->tx_cfg[i].fifo_len - 1;
551                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
552                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
553                     config->tx_cfg[i].fifo_len - 1;
554                 mac_control->fifos[i].fifo_no = i;
555                 mac_control->fifos[i].nic = nic;
556                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
557
558                 for (j = 0; j < page_num; j++) {
559                         int k = 0;
560                         dma_addr_t tmp_p;
561                         void *tmp_v;
562                         tmp_v = pci_alloc_consistent(nic->pdev,
563                                                      PAGE_SIZE, &tmp_p);
564                         if (!tmp_v) {
565                                 DBG_PRINT(INFO_DBG,
566                                           "pci_alloc_consistent ");
567                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
568                                 return -ENOMEM;
569                         }
570                         /* If we got a zero DMA address(can happen on
571                          * certain platforms like PPC), reallocate.
572                          * Store virtual address of page we don't want,
573                          * to be freed later.
574                          */
575                         if (!tmp_p) {
576                                 mac_control->zerodma_virt_addr = tmp_v;
577                                 DBG_PRINT(INIT_DBG,
578                                 "%s: Zero DMA address for TxDL. ", dev->name);
579                                 DBG_PRINT(INIT_DBG,
580                                 "Virtual address %p\n", tmp_v);
581                                 tmp_v = pci_alloc_consistent(nic->pdev,
582                                                      PAGE_SIZE, &tmp_p);
583                                 if (!tmp_v) {
584                                         DBG_PRINT(INFO_DBG,
585                                           "pci_alloc_consistent ");
586                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
587                                         return -ENOMEM;
588                                 }
589                                 mem_allocated += PAGE_SIZE;
590                         }
591                         while (k < lst_per_page) {
592                                 int l = (j * lst_per_page) + k;
593                                 if (l == config->tx_cfg[i].fifo_len)
594                                         break;
595                                 mac_control->fifos[i].list_info[l].list_virt_addr =
596                                     tmp_v + (k * lst_size);
597                                 mac_control->fifos[i].list_info[l].list_phy_addr =
598                                     tmp_p + (k * lst_size);
599                                 k++;
600                         }
601                 }
602         }
603
604         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
605         if (!nic->ufo_in_band_v)
606                 return -ENOMEM;
607          mem_allocated += (size * sizeof(u64));
608
609         /* Allocation and initialization of RXDs in Rings */
610         size = 0;
611         for (i = 0; i < config->rx_ring_num; i++) {
612                 if (config->rx_cfg[i].num_rxd %
613                     (rxd_count[nic->rxd_mode] + 1)) {
614                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
615                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
616                                   i);
617                         DBG_PRINT(ERR_DBG, "RxDs per Block");
618                         return FAILURE;
619                 }
620                 size += config->rx_cfg[i].num_rxd;
621                 mac_control->rings[i].block_count =
622                         config->rx_cfg[i].num_rxd /
623                         (rxd_count[nic->rxd_mode] + 1 );
624                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
625                         mac_control->rings[i].block_count;
626         }
627         if (nic->rxd_mode == RXD_MODE_1)
628                 size = (size * (sizeof(struct RxD1)));
629         else
630                 size = (size * (sizeof(struct RxD3)));
631
632         for (i = 0; i < config->rx_ring_num; i++) {
633                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
634                 mac_control->rings[i].rx_curr_get_info.offset = 0;
635                 mac_control->rings[i].rx_curr_get_info.ring_len =
636                     config->rx_cfg[i].num_rxd - 1;
637                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
638                 mac_control->rings[i].rx_curr_put_info.offset = 0;
639                 mac_control->rings[i].rx_curr_put_info.ring_len =
640                     config->rx_cfg[i].num_rxd - 1;
641                 mac_control->rings[i].nic = nic;
642                 mac_control->rings[i].ring_no = i;
643
644                 blk_cnt = config->rx_cfg[i].num_rxd /
645                                 (rxd_count[nic->rxd_mode] + 1);
646                 /*  Allocating all the Rx blocks */
647                 for (j = 0; j < blk_cnt; j++) {
648                         struct rx_block_info *rx_blocks;
649                         int l;
650
651                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
652                         size = SIZE_OF_BLOCK; //size is always page size
653                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
654                                                           &tmp_p_addr);
655                         if (tmp_v_addr == NULL) {
656                                 /*
657                                  * In case of failure, free_shared_mem()
658                                  * is called, which should free any
659                                  * memory that was alloced till the
660                                  * failure happened.
661                                  */
662                                 rx_blocks->block_virt_addr = tmp_v_addr;
663                                 return -ENOMEM;
664                         }
665                         mem_allocated += size;
666                         memset(tmp_v_addr, 0, size);
667                         rx_blocks->block_virt_addr = tmp_v_addr;
668                         rx_blocks->block_dma_addr = tmp_p_addr;
669                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
670                                                   rxd_count[nic->rxd_mode],
671                                                   GFP_KERNEL);
672                         if (!rx_blocks->rxds)
673                                 return -ENOMEM;
674                         mem_allocated += 
675                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
676                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
677                                 rx_blocks->rxds[l].virt_addr =
678                                         rx_blocks->block_virt_addr +
679                                         (rxd_size[nic->rxd_mode] * l);
680                                 rx_blocks->rxds[l].dma_addr =
681                                         rx_blocks->block_dma_addr +
682                                         (rxd_size[nic->rxd_mode] * l);
683                         }
684                 }
685                 /* Interlinking all Rx Blocks */
686                 for (j = 0; j < blk_cnt; j++) {
687                         tmp_v_addr =
688                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
689                         tmp_v_addr_next =
690                                 mac_control->rings[i].rx_blocks[(j + 1) %
691                                               blk_cnt].block_virt_addr;
692                         tmp_p_addr =
693                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
694                         tmp_p_addr_next =
695                                 mac_control->rings[i].rx_blocks[(j + 1) %
696                                               blk_cnt].block_dma_addr;
697
698                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
699                         pre_rxd_blk->reserved_2_pNext_RxD_block =
700                             (unsigned long) tmp_v_addr_next;
701                         pre_rxd_blk->pNext_RxD_Blk_physical =
702                             (u64) tmp_p_addr_next;
703                 }
704         }
705         if (nic->rxd_mode == RXD_MODE_3B) {
706                 /*
707                  * Allocation of Storages for buffer addresses in 2BUFF mode
708                  * and the buffers as well.
709                  */
710                 for (i = 0; i < config->rx_ring_num; i++) {
711                         blk_cnt = config->rx_cfg[i].num_rxd /
712                            (rxd_count[nic->rxd_mode]+ 1);
713                         mac_control->rings[i].ba =
714                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
715                                      GFP_KERNEL);
716                         if (!mac_control->rings[i].ba)
717                                 return -ENOMEM;
718                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
719                         for (j = 0; j < blk_cnt; j++) {
720                                 int k = 0;
721                                 mac_control->rings[i].ba[j] =
722                                         kmalloc((sizeof(struct buffAdd) *
723                                                 (rxd_count[nic->rxd_mode] + 1)),
724                                                 GFP_KERNEL);
725                                 if (!mac_control->rings[i].ba[j])
726                                         return -ENOMEM;
727                                 mem_allocated += (sizeof(struct buffAdd) *  \
728                                         (rxd_count[nic->rxd_mode] + 1));
729                                 while (k != rxd_count[nic->rxd_mode]) {
730                                         ba = &mac_control->rings[i].ba[j][k];
731
732                                         ba->ba_0_org = (void *) kmalloc
733                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
734                                         if (!ba->ba_0_org)
735                                                 return -ENOMEM;
736                                         mem_allocated += 
737                                                 (BUF0_LEN + ALIGN_SIZE);
738                                         tmp = (unsigned long)ba->ba_0_org;
739                                         tmp += ALIGN_SIZE;
740                                         tmp &= ~((unsigned long) ALIGN_SIZE);
741                                         ba->ba_0 = (void *) tmp;
742
743                                         ba->ba_1_org = (void *) kmalloc
744                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
745                                         if (!ba->ba_1_org)
746                                                 return -ENOMEM;
747                                         mem_allocated 
748                                                 += (BUF1_LEN + ALIGN_SIZE);
749                                         tmp = (unsigned long) ba->ba_1_org;
750                                         tmp += ALIGN_SIZE;
751                                         tmp &= ~((unsigned long) ALIGN_SIZE);
752                                         ba->ba_1 = (void *) tmp;
753                                         k++;
754                                 }
755                         }
756                 }
757         }
758
759         /* Allocation and initialization of Statistics block */
760         size = sizeof(struct stat_block);
761         mac_control->stats_mem = pci_alloc_consistent
762             (nic->pdev, size, &mac_control->stats_mem_phy);
763
764         if (!mac_control->stats_mem) {
765                 /*
766                  * In case of failure, free_shared_mem() is called, which
767                  * should free any memory that was alloced till the
768                  * failure happened.
769                  */
770                 return -ENOMEM;
771         }
772         mem_allocated += size;
773         mac_control->stats_mem_sz = size;
774
775         tmp_v_addr = mac_control->stats_mem;
776         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
777         memset(tmp_v_addr, 0, size);
778         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
779                   (unsigned long long) tmp_p_addr);
780         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
781         return SUCCESS;
782 }
783
784 /**
785  * free_shared_mem - Free the allocated Memory
786  * @nic:  Device private variable.
787  * Description: This function is to free all memory locations allocated by
788  * the init_shared_mem() function and return it to the kernel.
789  */
790
791 static void free_shared_mem(struct s2io_nic *nic)
792 {
793         int i, j, blk_cnt, size;
794         u32 ufo_size = 0;
795         void *tmp_v_addr;
796         dma_addr_t tmp_p_addr;
797         struct mac_info *mac_control;
798         struct config_param *config;
799         int lst_size, lst_per_page;
800         struct net_device *dev;
801         int page_num = 0;
802
803         if (!nic)
804                 return;
805
806         dev = nic->dev;
807
808         mac_control = &nic->mac_control;
809         config = &nic->config;
810
811         lst_size = (sizeof(struct TxD) * config->max_txds);
812         lst_per_page = PAGE_SIZE / lst_size;
813
814         for (i = 0; i < config->tx_fifo_num; i++) {
815                 ufo_size += config->tx_cfg[i].fifo_len;
816                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
817                                                         lst_per_page);
818                 for (j = 0; j < page_num; j++) {
819                         int mem_blks = (j * lst_per_page);
820                         if (!mac_control->fifos[i].list_info)
821                                 return;
822                         if (!mac_control->fifos[i].list_info[mem_blks].
823                                  list_virt_addr)
824                                 break;
825                         pci_free_consistent(nic->pdev, PAGE_SIZE,
826                                             mac_control->fifos[i].
827                                             list_info[mem_blks].
828                                             list_virt_addr,
829                                             mac_control->fifos[i].
830                                             list_info[mem_blks].
831                                             list_phy_addr);
832                         nic->mac_control.stats_info->sw_stat.mem_freed 
833                                                 += PAGE_SIZE;
834                 }
835                 /* If we got a zero DMA address during allocation,
836                  * free the page now
837                  */
838                 if (mac_control->zerodma_virt_addr) {
839                         pci_free_consistent(nic->pdev, PAGE_SIZE,
840                                             mac_control->zerodma_virt_addr,
841                                             (dma_addr_t)0);
842                         DBG_PRINT(INIT_DBG,
843                                 "%s: Freeing TxDL with zero DMA addr. ",
844                                 dev->name);
845                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
846                                 mac_control->zerodma_virt_addr);
847                         nic->mac_control.stats_info->sw_stat.mem_freed 
848                                                 += PAGE_SIZE;
849                 }
850                 kfree(mac_control->fifos[i].list_info);
851                 nic->mac_control.stats_info->sw_stat.mem_freed += 
852                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
853         }
854
855         size = SIZE_OF_BLOCK;
856         for (i = 0; i < config->rx_ring_num; i++) {
857                 blk_cnt = mac_control->rings[i].block_count;
858                 for (j = 0; j < blk_cnt; j++) {
859                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
860                                 block_virt_addr;
861                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
862                                 block_dma_addr;
863                         if (tmp_v_addr == NULL)
864                                 break;
865                         pci_free_consistent(nic->pdev, size,
866                                             tmp_v_addr, tmp_p_addr);
867                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
868                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
869                         nic->mac_control.stats_info->sw_stat.mem_freed += 
870                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
871                 }
872         }
873
874         if (nic->rxd_mode == RXD_MODE_3B) {
875                 /* Freeing buffer storage addresses in 2BUFF mode. */
876                 for (i = 0; i < config->rx_ring_num; i++) {
877                         blk_cnt = config->rx_cfg[i].num_rxd /
878                             (rxd_count[nic->rxd_mode] + 1);
879                         for (j = 0; j < blk_cnt; j++) {
880                                 int k = 0;
881                                 if (!mac_control->rings[i].ba[j])
882                                         continue;
883                                 while (k != rxd_count[nic->rxd_mode]) {
884                                         struct buffAdd *ba =
885                                                 &mac_control->rings[i].ba[j][k];
886                                         kfree(ba->ba_0_org);
887                                         nic->mac_control.stats_info->sw_stat.\
888                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
889                                         kfree(ba->ba_1_org);
890                                         nic->mac_control.stats_info->sw_stat.\
891                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
892                                         k++;
893                                 }
894                                 kfree(mac_control->rings[i].ba[j]);
895                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
896                                         (sizeof(struct buffAdd) *
897                                         (rxd_count[nic->rxd_mode] + 1));
898                         }
899                         kfree(mac_control->rings[i].ba);
900                         nic->mac_control.stats_info->sw_stat.mem_freed += 
901                         (sizeof(struct buffAdd *) * blk_cnt);
902                 }
903         }
904
905         if (mac_control->stats_mem) {
906                 pci_free_consistent(nic->pdev,
907                                     mac_control->stats_mem_sz,
908                                     mac_control->stats_mem,
909                                     mac_control->stats_mem_phy);
910                 nic->mac_control.stats_info->sw_stat.mem_freed += 
911                         mac_control->stats_mem_sz;
912         }
913         if (nic->ufo_in_band_v) {
914                 kfree(nic->ufo_in_band_v);
915                 nic->mac_control.stats_info->sw_stat.mem_freed 
916                         += (ufo_size * sizeof(u64));
917         }
918 }
919
920 /**
921  * s2io_verify_pci_mode -
922  */
923
924 static int s2io_verify_pci_mode(struct s2io_nic *nic)
925 {
926         struct XENA_dev_config __iomem *bar0 = nic->bar0;
927         register u64 val64 = 0;
928         int     mode;
929
930         val64 = readq(&bar0->pci_mode);
931         mode = (u8)GET_PCI_MODE(val64);
932
933         if ( val64 & PCI_MODE_UNKNOWN_MODE)
934                 return -1;      /* Unknown PCI mode */
935         return mode;
936 }
937
938 #define NEC_VENID   0x1033
939 #define NEC_DEVID   0x0125
940 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
941 {
942         struct pci_dev *tdev = NULL;
943         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
944                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
945                         if (tdev->bus == s2io_pdev->bus->parent)
946                                 pci_dev_put(tdev);
947                                 return 1;
948                 }
949         }
950         return 0;
951 }
952
953 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
954 /**
955  * s2io_print_pci_mode -
956  */
957 static int s2io_print_pci_mode(struct s2io_nic *nic)
958 {
959         struct XENA_dev_config __iomem *bar0 = nic->bar0;
960         register u64 val64 = 0;
961         int     mode;
962         struct config_param *config = &nic->config;
963
964         val64 = readq(&bar0->pci_mode);
965         mode = (u8)GET_PCI_MODE(val64);
966
967         if ( val64 & PCI_MODE_UNKNOWN_MODE)
968                 return -1;      /* Unknown PCI mode */
969
970         config->bus_speed = bus_speed[mode];
971
972         if (s2io_on_nec_bridge(nic->pdev)) {
973                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
974                                                         nic->dev->name);
975                 return mode;
976         }
977
978         if (val64 & PCI_MODE_32_BITS) {
979                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
980         } else {
981                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
982         }
983
984         switch(mode) {
985                 case PCI_MODE_PCI_33:
986                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
987                         break;
988                 case PCI_MODE_PCI_66:
989                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
990                         break;
991                 case PCI_MODE_PCIX_M1_66:
992                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
993                         break;
994                 case PCI_MODE_PCIX_M1_100:
995                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
996                         break;
997                 case PCI_MODE_PCIX_M1_133:
998                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
999                         break;
1000                 case PCI_MODE_PCIX_M2_66:
1001                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1002                         break;
1003                 case PCI_MODE_PCIX_M2_100:
1004                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1005                         break;
1006                 case PCI_MODE_PCIX_M2_133:
1007                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1008                         break;
1009                 default:
1010                         return -1;      /* Unsupported bus speed */
1011         }
1012
1013         return mode;
1014 }
1015
1016 /**
1017  *  init_nic - Initialization of hardware
1018  *  @nic: device peivate variable
1019  *  Description: The function sequentially configures every block
1020  *  of the H/W from their reset values.
1021  *  Return Value:  SUCCESS on success and
1022  *  '-1' on failure (endian settings incorrect).
1023  */
1024
1025 static int init_nic(struct s2io_nic *nic)
1026 {
1027         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1028         struct net_device *dev = nic->dev;
1029         register u64 val64 = 0;
1030         void __iomem *add;
1031         u32 time;
1032         int i, j;
1033         struct mac_info *mac_control;
1034         struct config_param *config;
1035         int dtx_cnt = 0;
1036         unsigned long long mem_share;
1037         int mem_size;
1038
1039         mac_control = &nic->mac_control;
1040         config = &nic->config;
1041
1042         /* to set the swapper controle on the card */
1043         if(s2io_set_swapper(nic)) {
1044                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1045                 return -1;
1046         }
1047
1048         /*
1049          * Herc requires EOI to be removed from reset before XGXS, so..
1050          */
1051         if (nic->device_type & XFRAME_II_DEVICE) {
1052                 val64 = 0xA500000000ULL;
1053                 writeq(val64, &bar0->sw_reset);
1054                 msleep(500);
1055                 val64 = readq(&bar0->sw_reset);
1056         }
1057
1058         /* Remove XGXS from reset state */
1059         val64 = 0;
1060         writeq(val64, &bar0->sw_reset);
1061         msleep(500);
1062         val64 = readq(&bar0->sw_reset);
1063
1064         /*  Enable Receiving broadcasts */
1065         add = &bar0->mac_cfg;
1066         val64 = readq(&bar0->mac_cfg);
1067         val64 |= MAC_RMAC_BCAST_ENABLE;
1068         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1069         writel((u32) val64, add);
1070         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1071         writel((u32) (val64 >> 32), (add + 4));
1072
1073         /* Read registers in all blocks */
1074         val64 = readq(&bar0->mac_int_mask);
1075         val64 = readq(&bar0->mc_int_mask);
1076         val64 = readq(&bar0->xgxs_int_mask);
1077
1078         /*  Set MTU */
1079         val64 = dev->mtu;
1080         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1081
1082         if (nic->device_type & XFRAME_II_DEVICE) {
1083                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1084                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1085                                           &bar0->dtx_control, UF);
1086                         if (dtx_cnt & 0x1)
1087                                 msleep(1); /* Necessary!! */
1088                         dtx_cnt++;
1089                 }
1090         } else {
1091                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1092                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1093                                           &bar0->dtx_control, UF);
1094                         val64 = readq(&bar0->dtx_control);
1095                         dtx_cnt++;
1096                 }
1097         }
1098
1099         /*  Tx DMA Initialization */
1100         val64 = 0;
1101         writeq(val64, &bar0->tx_fifo_partition_0);
1102         writeq(val64, &bar0->tx_fifo_partition_1);
1103         writeq(val64, &bar0->tx_fifo_partition_2);
1104         writeq(val64, &bar0->tx_fifo_partition_3);
1105
1106
1107         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1108                 val64 |=
1109                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1110                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1111                                     ((i * 32) + 5), 3);
1112
1113                 if (i == (config->tx_fifo_num - 1)) {
1114                         if (i % 2 == 0)
1115                                 i++;
1116                 }
1117
1118                 switch (i) {
1119                 case 1:
1120                         writeq(val64, &bar0->tx_fifo_partition_0);
1121                         val64 = 0;
1122                         break;
1123                 case 3:
1124                         writeq(val64, &bar0->tx_fifo_partition_1);
1125                         val64 = 0;
1126                         break;
1127                 case 5:
1128                         writeq(val64, &bar0->tx_fifo_partition_2);
1129                         val64 = 0;
1130                         break;
1131                 case 7:
1132                         writeq(val64, &bar0->tx_fifo_partition_3);
1133                         break;
1134                 }
1135         }
1136
1137         /*
1138          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1139          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1140          */
1141         if ((nic->device_type == XFRAME_I_DEVICE) &&
1142                 (nic->pdev->revision < 4))
1143                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1144
1145         val64 = readq(&bar0->tx_fifo_partition_0);
1146         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1147                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1148
1149         /*
1150          * Initialization of Tx_PA_CONFIG register to ignore packet
1151          * integrity checking.
1152          */
1153         val64 = readq(&bar0->tx_pa_cfg);
1154         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1155             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1156         writeq(val64, &bar0->tx_pa_cfg);
1157
1158         /* Rx DMA intialization. */
1159         val64 = 0;
1160         for (i = 0; i < config->rx_ring_num; i++) {
1161                 val64 |=
1162                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1163                          3);
1164         }
1165         writeq(val64, &bar0->rx_queue_priority);
1166
1167         /*
1168          * Allocating equal share of memory to all the
1169          * configured Rings.
1170          */
1171         val64 = 0;
1172         if (nic->device_type & XFRAME_II_DEVICE)
1173                 mem_size = 32;
1174         else
1175                 mem_size = 64;
1176
1177         for (i = 0; i < config->rx_ring_num; i++) {
1178                 switch (i) {
1179                 case 0:
1180                         mem_share = (mem_size / config->rx_ring_num +
1181                                      mem_size % config->rx_ring_num);
1182                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1183                         continue;
1184                 case 1:
1185                         mem_share = (mem_size / config->rx_ring_num);
1186                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1187                         continue;
1188                 case 2:
1189                         mem_share = (mem_size / config->rx_ring_num);
1190                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1191                         continue;
1192                 case 3:
1193                         mem_share = (mem_size / config->rx_ring_num);
1194                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1195                         continue;
1196                 case 4:
1197                         mem_share = (mem_size / config->rx_ring_num);
1198                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1199                         continue;
1200                 case 5:
1201                         mem_share = (mem_size / config->rx_ring_num);
1202                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1203                         continue;
1204                 case 6:
1205                         mem_share = (mem_size / config->rx_ring_num);
1206                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1207                         continue;
1208                 case 7:
1209                         mem_share = (mem_size / config->rx_ring_num);
1210                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1211                         continue;
1212                 }
1213         }
1214         writeq(val64, &bar0->rx_queue_cfg);
1215
1216         /*
1217          * Filling Tx round robin registers
1218          * as per the number of FIFOs
1219          */
1220         switch (config->tx_fifo_num) {
1221         case 1:
1222                 val64 = 0x0000000000000000ULL;
1223                 writeq(val64, &bar0->tx_w_round_robin_0);
1224                 writeq(val64, &bar0->tx_w_round_robin_1);
1225                 writeq(val64, &bar0->tx_w_round_robin_2);
1226                 writeq(val64, &bar0->tx_w_round_robin_3);
1227                 writeq(val64, &bar0->tx_w_round_robin_4);
1228                 break;
1229         case 2:
1230                 val64 = 0x0000010000010000ULL;
1231                 writeq(val64, &bar0->tx_w_round_robin_0);
1232                 val64 = 0x0100000100000100ULL;
1233                 writeq(val64, &bar0->tx_w_round_robin_1);
1234                 val64 = 0x0001000001000001ULL;
1235                 writeq(val64, &bar0->tx_w_round_robin_2);
1236                 val64 = 0x0000010000010000ULL;
1237                 writeq(val64, &bar0->tx_w_round_robin_3);
1238                 val64 = 0x0100000000000000ULL;
1239                 writeq(val64, &bar0->tx_w_round_robin_4);
1240                 break;
1241         case 3:
1242                 val64 = 0x0001000102000001ULL;
1243                 writeq(val64, &bar0->tx_w_round_robin_0);
1244                 val64 = 0x0001020000010001ULL;
1245                 writeq(val64, &bar0->tx_w_round_robin_1);
1246                 val64 = 0x0200000100010200ULL;
1247                 writeq(val64, &bar0->tx_w_round_robin_2);
1248                 val64 = 0x0001000102000001ULL;
1249                 writeq(val64, &bar0->tx_w_round_robin_3);
1250                 val64 = 0x0001020000000000ULL;
1251                 writeq(val64, &bar0->tx_w_round_robin_4);
1252                 break;
1253         case 4:
1254                 val64 = 0x0001020300010200ULL;
1255                 writeq(val64, &bar0->tx_w_round_robin_0);
1256                 val64 = 0x0100000102030001ULL;
1257                 writeq(val64, &bar0->tx_w_round_robin_1);
1258                 val64 = 0x0200010000010203ULL;
1259                 writeq(val64, &bar0->tx_w_round_robin_2);
1260                 val64 = 0x0001020001000001ULL;
1261                 writeq(val64, &bar0->tx_w_round_robin_3);
1262                 val64 = 0x0203000100000000ULL;
1263                 writeq(val64, &bar0->tx_w_round_robin_4);
1264                 break;
1265         case 5:
1266                 val64 = 0x0001000203000102ULL;
1267                 writeq(val64, &bar0->tx_w_round_robin_0);
1268                 val64 = 0x0001020001030004ULL;
1269                 writeq(val64, &bar0->tx_w_round_robin_1);
1270                 val64 = 0x0001000203000102ULL;
1271                 writeq(val64, &bar0->tx_w_round_robin_2);
1272                 val64 = 0x0001020001030004ULL;
1273                 writeq(val64, &bar0->tx_w_round_robin_3);
1274                 val64 = 0x0001000000000000ULL;
1275                 writeq(val64, &bar0->tx_w_round_robin_4);
1276                 break;
1277         case 6:
1278                 val64 = 0x0001020304000102ULL;
1279                 writeq(val64, &bar0->tx_w_round_robin_0);
1280                 val64 = 0x0304050001020001ULL;
1281                 writeq(val64, &bar0->tx_w_round_robin_1);
1282                 val64 = 0x0203000100000102ULL;
1283                 writeq(val64, &bar0->tx_w_round_robin_2);
1284                 val64 = 0x0304000102030405ULL;
1285                 writeq(val64, &bar0->tx_w_round_robin_3);
1286                 val64 = 0x0001000200000000ULL;
1287                 writeq(val64, &bar0->tx_w_round_robin_4);
1288                 break;
1289         case 7:
1290                 val64 = 0x0001020001020300ULL;
1291                 writeq(val64, &bar0->tx_w_round_robin_0);
1292                 val64 = 0x0102030400010203ULL;
1293                 writeq(val64, &bar0->tx_w_round_robin_1);
1294                 val64 = 0x0405060001020001ULL;
1295                 writeq(val64, &bar0->tx_w_round_robin_2);
1296                 val64 = 0x0304050000010200ULL;
1297                 writeq(val64, &bar0->tx_w_round_robin_3);
1298                 val64 = 0x0102030000000000ULL;
1299                 writeq(val64, &bar0->tx_w_round_robin_4);
1300                 break;
1301         case 8:
1302                 val64 = 0x0001020300040105ULL;
1303                 writeq(val64, &bar0->tx_w_round_robin_0);
1304                 val64 = 0x0200030106000204ULL;
1305                 writeq(val64, &bar0->tx_w_round_robin_1);
1306                 val64 = 0x0103000502010007ULL;
1307                 writeq(val64, &bar0->tx_w_round_robin_2);
1308                 val64 = 0x0304010002060500ULL;
1309                 writeq(val64, &bar0->tx_w_round_robin_3);
1310                 val64 = 0x0103020400000000ULL;
1311                 writeq(val64, &bar0->tx_w_round_robin_4);
1312                 break;
1313         }
1314
1315         /* Enable all configured Tx FIFO partitions */
1316         val64 = readq(&bar0->tx_fifo_partition_0);
1317         val64 |= (TX_FIFO_PARTITION_EN);
1318         writeq(val64, &bar0->tx_fifo_partition_0);
1319
1320         /* Filling the Rx round robin registers as per the
1321          * number of Rings and steering based on QoS.
1322          */
1323         switch (config->rx_ring_num) {
1324         case 1:
1325                 val64 = 0x8080808080808080ULL;
1326                 writeq(val64, &bar0->rts_qos_steering);
1327                 break;
1328         case 2:
1329                 val64 = 0x0000010000010000ULL;
1330                 writeq(val64, &bar0->rx_w_round_robin_0);
1331                 val64 = 0x0100000100000100ULL;
1332                 writeq(val64, &bar0->rx_w_round_robin_1);
1333                 val64 = 0x0001000001000001ULL;
1334                 writeq(val64, &bar0->rx_w_round_robin_2);
1335                 val64 = 0x0000010000010000ULL;
1336                 writeq(val64, &bar0->rx_w_round_robin_3);
1337                 val64 = 0x0100000000000000ULL;
1338                 writeq(val64, &bar0->rx_w_round_robin_4);
1339
1340                 val64 = 0x8080808040404040ULL;
1341                 writeq(val64, &bar0->rts_qos_steering);
1342                 break;
1343         case 3:
1344                 val64 = 0x0001000102000001ULL;
1345                 writeq(val64, &bar0->rx_w_round_robin_0);
1346                 val64 = 0x0001020000010001ULL;
1347                 writeq(val64, &bar0->rx_w_round_robin_1);
1348                 val64 = 0x0200000100010200ULL;
1349                 writeq(val64, &bar0->rx_w_round_robin_2);
1350                 val64 = 0x0001000102000001ULL;
1351                 writeq(val64, &bar0->rx_w_round_robin_3);
1352                 val64 = 0x0001020000000000ULL;
1353                 writeq(val64, &bar0->rx_w_round_robin_4);
1354
1355                 val64 = 0x8080804040402020ULL;
1356                 writeq(val64, &bar0->rts_qos_steering);
1357                 break;
1358         case 4:
1359                 val64 = 0x0001020300010200ULL;
1360                 writeq(val64, &bar0->rx_w_round_robin_0);
1361                 val64 = 0x0100000102030001ULL;
1362                 writeq(val64, &bar0->rx_w_round_robin_1);
1363                 val64 = 0x0200010000010203ULL;
1364                 writeq(val64, &bar0->rx_w_round_robin_2);
1365                 val64 = 0x0001020001000001ULL;
1366                 writeq(val64, &bar0->rx_w_round_robin_3);
1367                 val64 = 0x0203000100000000ULL;
1368                 writeq(val64, &bar0->rx_w_round_robin_4);
1369
1370                 val64 = 0x8080404020201010ULL;
1371                 writeq(val64, &bar0->rts_qos_steering);
1372                 break;
1373         case 5:
1374                 val64 = 0x0001000203000102ULL;
1375                 writeq(val64, &bar0->rx_w_round_robin_0);
1376                 val64 = 0x0001020001030004ULL;
1377                 writeq(val64, &bar0->rx_w_round_robin_1);
1378                 val64 = 0x0001000203000102ULL;
1379                 writeq(val64, &bar0->rx_w_round_robin_2);
1380                 val64 = 0x0001020001030004ULL;
1381                 writeq(val64, &bar0->rx_w_round_robin_3);
1382                 val64 = 0x0001000000000000ULL;
1383                 writeq(val64, &bar0->rx_w_round_robin_4);
1384
1385                 val64 = 0x8080404020201008ULL;
1386                 writeq(val64, &bar0->rts_qos_steering);
1387                 break;
1388         case 6:
1389                 val64 = 0x0001020304000102ULL;
1390                 writeq(val64, &bar0->rx_w_round_robin_0);
1391                 val64 = 0x0304050001020001ULL;
1392                 writeq(val64, &bar0->rx_w_round_robin_1);
1393                 val64 = 0x0203000100000102ULL;
1394                 writeq(val64, &bar0->rx_w_round_robin_2);
1395                 val64 = 0x0304000102030405ULL;
1396                 writeq(val64, &bar0->rx_w_round_robin_3);
1397                 val64 = 0x0001000200000000ULL;
1398                 writeq(val64, &bar0->rx_w_round_robin_4);
1399
1400                 val64 = 0x8080404020100804ULL;
1401                 writeq(val64, &bar0->rts_qos_steering);
1402                 break;
1403         case 7:
1404                 val64 = 0x0001020001020300ULL;
1405                 writeq(val64, &bar0->rx_w_round_robin_0);
1406                 val64 = 0x0102030400010203ULL;
1407                 writeq(val64, &bar0->rx_w_round_robin_1);
1408                 val64 = 0x0405060001020001ULL;
1409                 writeq(val64, &bar0->rx_w_round_robin_2);
1410                 val64 = 0x0304050000010200ULL;
1411                 writeq(val64, &bar0->rx_w_round_robin_3);
1412                 val64 = 0x0102030000000000ULL;
1413                 writeq(val64, &bar0->rx_w_round_robin_4);
1414
1415                 val64 = 0x8080402010080402ULL;
1416                 writeq(val64, &bar0->rts_qos_steering);
1417                 break;
1418         case 8:
1419                 val64 = 0x0001020300040105ULL;
1420                 writeq(val64, &bar0->rx_w_round_robin_0);
1421                 val64 = 0x0200030106000204ULL;
1422                 writeq(val64, &bar0->rx_w_round_robin_1);
1423                 val64 = 0x0103000502010007ULL;
1424                 writeq(val64, &bar0->rx_w_round_robin_2);
1425                 val64 = 0x0304010002060500ULL;
1426                 writeq(val64, &bar0->rx_w_round_robin_3);
1427                 val64 = 0x0103020400000000ULL;
1428                 writeq(val64, &bar0->rx_w_round_robin_4);
1429
1430                 val64 = 0x8040201008040201ULL;
1431                 writeq(val64, &bar0->rts_qos_steering);
1432                 break;
1433         }
1434
1435         /* UDP Fix */
1436         val64 = 0;
1437         for (i = 0; i < 8; i++)
1438                 writeq(val64, &bar0->rts_frm_len_n[i]);
1439
1440         /* Set the default rts frame length for the rings configured */
1441         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1442         for (i = 0 ; i < config->rx_ring_num ; i++)
1443                 writeq(val64, &bar0->rts_frm_len_n[i]);
1444
1445         /* Set the frame length for the configured rings
1446          * desired by the user
1447          */
1448         for (i = 0; i < config->rx_ring_num; i++) {
1449                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1450                  * specified frame length steering.
1451                  * If the user provides the frame length then program
1452                  * the rts_frm_len register for those values or else
1453                  * leave it as it is.
1454                  */
1455                 if (rts_frm_len[i] != 0) {
1456                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1457                                 &bar0->rts_frm_len_n[i]);
1458                 }
1459         }
1460         
1461         /* Disable differentiated services steering logic */
1462         for (i = 0; i < 64; i++) {
1463                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1464                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1465                                 dev->name);
1466                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1467                         return FAILURE;
1468                 }
1469         }
1470
1471         /* Program statistics memory */
1472         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1473
1474         if (nic->device_type == XFRAME_II_DEVICE) {
1475                 val64 = STAT_BC(0x320);
1476                 writeq(val64, &bar0->stat_byte_cnt);
1477         }
1478
1479         /*
1480          * Initializing the sampling rate for the device to calculate the
1481          * bandwidth utilization.
1482          */
1483         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1484             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1485         writeq(val64, &bar0->mac_link_util);
1486
1487
1488         /*
1489          * Initializing the Transmit and Receive Traffic Interrupt
1490          * Scheme.
1491          */
1492         /*
1493          * TTI Initialization. Default Tx timer gets us about
1494          * 250 interrupts per sec. Continuous interrupts are enabled
1495          * by default.
1496          */
1497         if (nic->device_type == XFRAME_II_DEVICE) {
1498                 int count = (nic->config.bus_speed * 125)/2;
1499                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1500         } else {
1501
1502                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1503         }
1504         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1505             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1506             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1507                 if (use_continuous_tx_intrs)
1508                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1509         writeq(val64, &bar0->tti_data1_mem);
1510
1511         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1512             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1513             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1514         writeq(val64, &bar0->tti_data2_mem);
1515
1516         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1517         writeq(val64, &bar0->tti_command_mem);
1518
1519         /*
1520          * Once the operation completes, the Strobe bit of the command
1521          * register will be reset. We poll for this particular condition
1522          * We wait for a maximum of 500ms for the operation to complete,
1523          * if it's not complete by then we return error.
1524          */
1525         time = 0;
1526         while (TRUE) {
1527                 val64 = readq(&bar0->tti_command_mem);
1528                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1529                         break;
1530                 }
1531                 if (time > 10) {
1532                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1533                                   dev->name);
1534                         return -1;
1535                 }
1536                 msleep(50);
1537                 time++;
1538         }
1539
1540         if (nic->config.bimodal) {
1541                 int k = 0;
1542                 for (k = 0; k < config->rx_ring_num; k++) {
1543                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1544                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1545                         writeq(val64, &bar0->tti_command_mem);
1546
1547                 /*
1548                  * Once the operation completes, the Strobe bit of the command
1549                  * register will be reset. We poll for this particular condition
1550                  * We wait for a maximum of 500ms for the operation to complete,
1551                  * if it's not complete by then we return error.
1552                 */
1553                         time = 0;
1554                         while (TRUE) {
1555                                 val64 = readq(&bar0->tti_command_mem);
1556                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1557                                         break;
1558                                 }
1559                                 if (time > 10) {
1560                                         DBG_PRINT(ERR_DBG,
1561                                                 "%s: TTI init Failed\n",
1562                                         dev->name);
1563                                         return -1;
1564                                 }
1565                                 time++;
1566                                 msleep(50);
1567                         }
1568                 }
1569         } else {
1570
1571                 /* RTI Initialization */
1572                 if (nic->device_type == XFRAME_II_DEVICE) {
1573                         /*
1574                          * Programmed to generate Apprx 500 Intrs per
1575                          * second
1576                          */
1577                         int count = (nic->config.bus_speed * 125)/4;
1578                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1579                 } else {
1580                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1581                 }
1582                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1583                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1584                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1585
1586                 writeq(val64, &bar0->rti_data1_mem);
1587
1588                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1589                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1590                 if (nic->intr_type == MSI_X)
1591                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1592                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1593                 else
1594                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1595                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1596                 writeq(val64, &bar0->rti_data2_mem);
1597
1598                 for (i = 0; i < config->rx_ring_num; i++) {
1599                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1600                                         | RTI_CMD_MEM_OFFSET(i);
1601                         writeq(val64, &bar0->rti_command_mem);
1602
1603                         /*
1604                          * Once the operation completes, the Strobe bit of the
1605                          * command register will be reset. We poll for this
1606                          * particular condition. We wait for a maximum of 500ms
1607                          * for the operation to complete, if it's not complete
1608                          * by then we return error.
1609                          */
1610                         time = 0;
1611                         while (TRUE) {
1612                                 val64 = readq(&bar0->rti_command_mem);
1613                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1614                                         break;
1615                                 }
1616                                 if (time > 10) {
1617                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1618                                                   dev->name);
1619                                         return -1;
1620                                 }
1621                                 time++;
1622                                 msleep(50);
1623                         }
1624                 }
1625         }
1626
1627         /*
1628          * Initializing proper values as Pause threshold into all
1629          * the 8 Queues on Rx side.
1630          */
1631         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1632         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1633
1634         /* Disable RMAC PAD STRIPPING */
1635         add = &bar0->mac_cfg;
1636         val64 = readq(&bar0->mac_cfg);
1637         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1638         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1639         writel((u32) (val64), add);
1640         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1641         writel((u32) (val64 >> 32), (add + 4));
1642         val64 = readq(&bar0->mac_cfg);
1643
1644         /* Enable FCS stripping by adapter */
1645         add = &bar0->mac_cfg;
1646         val64 = readq(&bar0->mac_cfg);
1647         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1648         if (nic->device_type == XFRAME_II_DEVICE)
1649                 writeq(val64, &bar0->mac_cfg);
1650         else {
1651                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1652                 writel((u32) (val64), add);
1653                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1654                 writel((u32) (val64 >> 32), (add + 4));
1655         }
1656
1657         /*
1658          * Set the time value to be inserted in the pause frame
1659          * generated by xena.
1660          */
1661         val64 = readq(&bar0->rmac_pause_cfg);
1662         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1663         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1664         writeq(val64, &bar0->rmac_pause_cfg);
1665
1666         /*
1667          * Set the Threshold Limit for Generating the pause frame
1668          * If the amount of data in any Queue exceeds ratio of
1669          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1670          * pause frame is generated
1671          */
1672         val64 = 0;
1673         for (i = 0; i < 4; i++) {
1674                 val64 |=
1675                     (((u64) 0xFF00 | nic->mac_control.
1676                       mc_pause_threshold_q0q3)
1677                      << (i * 2 * 8));
1678         }
1679         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1680
1681         val64 = 0;
1682         for (i = 0; i < 4; i++) {
1683                 val64 |=
1684                     (((u64) 0xFF00 | nic->mac_control.
1685                       mc_pause_threshold_q4q7)
1686                      << (i * 2 * 8));
1687         }
1688         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1689
1690         /*
1691          * TxDMA will stop Read request if the number of read split has
1692          * exceeded the limit pointed by shared_splits
1693          */
1694         val64 = readq(&bar0->pic_control);
1695         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1696         writeq(val64, &bar0->pic_control);
1697
1698         if (nic->config.bus_speed == 266) {
1699                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1700                 writeq(0x0, &bar0->read_retry_delay);
1701                 writeq(0x0, &bar0->write_retry_delay);
1702         }
1703
1704         /*
1705          * Programming the Herc to split every write transaction
1706          * that does not start on an ADB to reduce disconnects.
1707          */
1708         if (nic->device_type == XFRAME_II_DEVICE) {
1709                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1710                         MISC_LINK_STABILITY_PRD(3);
1711                 writeq(val64, &bar0->misc_control);
1712                 val64 = readq(&bar0->pic_control2);
1713                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1714                 writeq(val64, &bar0->pic_control2);
1715         }
1716         if (strstr(nic->product_name, "CX4")) {
1717                 val64 = TMAC_AVG_IPG(0x17);
1718                 writeq(val64, &bar0->tmac_avg_ipg);
1719         }
1720
1721         return SUCCESS;
1722 }
1723 #define LINK_UP_DOWN_INTERRUPT          1
1724 #define MAC_RMAC_ERR_TIMER              2
1725
1726 static int s2io_link_fault_indication(struct s2io_nic *nic)
1727 {
1728         if (nic->intr_type != INTA)
1729                 return MAC_RMAC_ERR_TIMER;
1730         if (nic->device_type == XFRAME_II_DEVICE)
1731                 return LINK_UP_DOWN_INTERRUPT;
1732         else
1733                 return MAC_RMAC_ERR_TIMER;
1734 }
1735 /**
1736  *  do_s2io_write_bits -  update alarm bits in alarm register
1737  *  @value: alarm bits
1738  *  @flag: interrupt status
1739  *  @addr: address value
1740  *  Description: update alarm bits in alarm register
1741  *  Return Value:
1742  *  NONE.
1743  */
1744 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1745 {
1746         u64 temp64;
1747
1748         temp64 = readq(addr);
1749
1750         if(flag == ENABLE_INTRS)
1751                 temp64 &= ~((u64) value);
1752         else
1753                 temp64 |= ((u64) value);
1754         writeq(temp64, addr);
1755 }
1756
1757 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1758 {
1759         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1760         register u64 gen_int_mask = 0;
1761
1762         if (mask & TX_DMA_INTR) {
1763
1764                 gen_int_mask |= TXDMA_INT_M;
1765
1766                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1767                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1768                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1769                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1770
1771                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1772                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1773                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1774                                 &bar0->pfc_err_mask);
1775
1776                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1777                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1778                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1779
1780                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1781                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1782                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1783                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1784                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1785                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1786
1787                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1788                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1789
1790                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1791                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1792                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1793                                 flag, &bar0->lso_err_mask);
1794
1795                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1796                                 flag, &bar0->tpa_err_mask);
1797
1798                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1799
1800         }
1801
1802         if (mask & TX_MAC_INTR) {
1803                 gen_int_mask |= TXMAC_INT_M;
1804                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1805                                 &bar0->mac_int_mask);
1806                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1807                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1808                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1809                                 flag, &bar0->mac_tmac_err_mask);
1810         }
1811
1812         if (mask & TX_XGXS_INTR) {
1813                 gen_int_mask |= TXXGXS_INT_M;
1814                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1815                                 &bar0->xgxs_int_mask);
1816                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1817                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1818                                 flag, &bar0->xgxs_txgxs_err_mask);
1819         }
1820
1821         if (mask & RX_DMA_INTR) {
1822                 gen_int_mask |= RXDMA_INT_M;
1823                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1824                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1825                                 flag, &bar0->rxdma_int_mask);
1826                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1827                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1828                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1829                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1830                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1831                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1832                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1833                                 &bar0->prc_pcix_err_mask);
1834                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1835                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1836                                 &bar0->rpa_err_mask);
1837                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1838                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1839                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1840                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1841                                 flag, &bar0->rda_err_mask);
1842                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1843                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1844                                 flag, &bar0->rti_err_mask);
1845         }
1846
1847         if (mask & RX_MAC_INTR) {
1848                 gen_int_mask |= RXMAC_INT_M;
1849                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1850                                 &bar0->mac_int_mask);
1851                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1852                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1853                                 RMAC_DOUBLE_ECC_ERR |
1854                                 RMAC_LINK_STATE_CHANGE_INT,
1855                                 flag, &bar0->mac_rmac_err_mask);
1856         }
1857
1858         if (mask & RX_XGXS_INTR)
1859         {
1860                 gen_int_mask |= RXXGXS_INT_M;
1861                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1862                                 &bar0->xgxs_int_mask);
1863                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1864                                 &bar0->xgxs_rxgxs_err_mask);
1865         }
1866
1867         if (mask & MC_INTR) {
1868                 gen_int_mask |= MC_INT_M;
1869                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1870                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1871                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1872                                 &bar0->mc_err_mask);
1873         }
1874         nic->general_int_mask = gen_int_mask;
1875
1876         /* Remove this line when alarm interrupts are enabled */
1877         nic->general_int_mask = 0;
1878 }
1879 /**
1880  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1881  *  @nic: device private variable,
1882  *  @mask: A mask indicating which Intr block must be modified and,
1883  *  @flag: A flag indicating whether to enable or disable the Intrs.
1884  *  Description: This function will either disable or enable the interrupts
1885  *  depending on the flag argument. The mask argument can be used to
1886  *  enable/disable any Intr block.
1887  *  Return Value: NONE.
1888  */
1889
1890 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1891 {
1892         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1893         register u64 temp64 = 0, intr_mask = 0;
1894
1895         intr_mask = nic->general_int_mask;
1896
1897         /*  Top level interrupt classification */
1898         /*  PIC Interrupts */
1899         if (mask & TX_PIC_INTR) {
1900                 /*  Enable PIC Intrs in the general intr mask register */
1901                 intr_mask |= TXPIC_INT_M;
1902                 if (flag == ENABLE_INTRS) {
1903                         /*
1904                          * If Hercules adapter enable GPIO otherwise
1905                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1906                          * interrupts for now.
1907                          * TODO
1908                          */
1909                         if (s2io_link_fault_indication(nic) ==
1910                                         LINK_UP_DOWN_INTERRUPT ) {
1911                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1912                                                 &bar0->pic_int_mask);
1913                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1914                                                 &bar0->gpio_int_mask);
1915                         } else
1916                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1917                 } else if (flag == DISABLE_INTRS) {
1918                         /*
1919                          * Disable PIC Intrs in the general
1920                          * intr mask register
1921                          */
1922                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1923                 }
1924         }
1925
1926         /*  Tx traffic interrupts */
1927         if (mask & TX_TRAFFIC_INTR) {
1928                 intr_mask |= TXTRAFFIC_INT_M;
1929                 if (flag == ENABLE_INTRS) {
1930                         /*
1931                          * Enable all the Tx side interrupts
1932                          * writing 0 Enables all 64 TX interrupt levels
1933                          */
1934                         writeq(0x0, &bar0->tx_traffic_mask);
1935                 } else if (flag == DISABLE_INTRS) {
1936                         /*
1937                          * Disable Tx Traffic Intrs in the general intr mask
1938                          * register.
1939                          */
1940                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1941                 }
1942         }
1943
1944         /*  Rx traffic interrupts */
1945         if (mask & RX_TRAFFIC_INTR) {
1946                 intr_mask |= RXTRAFFIC_INT_M;
1947                 if (flag == ENABLE_INTRS) {
1948                         /* writing 0 Enables all 8 RX interrupt levels */
1949                         writeq(0x0, &bar0->rx_traffic_mask);
1950                 } else if (flag == DISABLE_INTRS) {
1951                         /*
1952                          * Disable Rx Traffic Intrs in the general intr mask
1953                          * register.
1954                          */
1955                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1956                 }
1957         }
1958
1959         temp64 = readq(&bar0->general_int_mask);
1960         if (flag == ENABLE_INTRS)
1961                 temp64 &= ~((u64) intr_mask);
1962         else
1963                 temp64 = DISABLE_ALL_INTRS;
1964         writeq(temp64, &bar0->general_int_mask);
1965
1966         nic->general_int_mask = readq(&bar0->general_int_mask);
1967 }
1968
1969 /**
1970  *  verify_pcc_quiescent- Checks for PCC quiescent state
1971  *  Return: 1 If PCC is quiescence
1972  *          0 If PCC is not quiescence
1973  */
1974 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1975 {
1976         int ret = 0, herc;
1977         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1978         u64 val64 = readq(&bar0->adapter_status);
1979         
1980         herc = (sp->device_type == XFRAME_II_DEVICE);
1981
1982         if (flag == FALSE) {
1983                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1984                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1985                                 ret = 1;
1986                 } else {
1987                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1988                                 ret = 1;
1989                 }
1990         } else {
1991                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1992                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1993                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1994                                 ret = 1;
1995                 } else {
1996                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1997                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1998                                 ret = 1;
1999                 }
2000         }
2001
2002         return ret;
2003 }
2004 /**
2005  *  verify_xena_quiescence - Checks whether the H/W is ready
2006  *  Description: Returns whether the H/W is ready to go or not. Depending
2007  *  on whether adapter enable bit was written or not the comparison
2008  *  differs and the calling function passes the input argument flag to
2009  *  indicate this.
2010  *  Return: 1 If xena is quiescence
2011  *          0 If Xena is not quiescence
2012  */
2013
2014 static int verify_xena_quiescence(struct s2io_nic *sp)
2015 {
2016         int  mode;
2017         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2018         u64 val64 = readq(&bar0->adapter_status);
2019         mode = s2io_verify_pci_mode(sp);
2020
2021         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2022                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2023                 return 0;
2024         }
2025         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2026         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2027                 return 0;
2028         }
2029         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2030                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2031                 return 0;
2032         }
2033         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2034                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2035                 return 0;
2036         }
2037         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2038                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2039                 return 0;
2040         }
2041         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2042                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2043                 return 0;
2044         }
2045         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2046                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2047                 return 0;
2048         }
2049         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2050                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2051                 return 0;
2052         }
2053
2054         /*
2055          * In PCI 33 mode, the P_PLL is not used, and therefore,
2056          * the the P_PLL_LOCK bit in the adapter_status register will
2057          * not be asserted.
2058          */
2059         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2060                 sp->device_type == XFRAME_II_DEVICE && mode !=
2061                 PCI_MODE_PCI_33) {
2062                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2063                 return 0;
2064         }
2065         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2066                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2067                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2068                 return 0;
2069         }
2070         return 1;
2071 }
2072
2073 /**
2074  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2075  * @sp: Pointer to device specifc structure
2076  * Description :
2077  * New procedure to clear mac address reading  problems on Alpha platforms
2078  *
2079  */
2080
2081 static void fix_mac_address(struct s2io_nic * sp)
2082 {
2083         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2084         u64 val64;
2085         int i = 0;
2086
2087         while (fix_mac[i] != END_SIGN) {
2088                 writeq(fix_mac[i++], &bar0->gpio_control);
2089                 udelay(10);
2090                 val64 = readq(&bar0->gpio_control);
2091         }
2092 }
2093
2094 /**
2095  *  start_nic - Turns the device on
2096  *  @nic : device private variable.
2097  *  Description:
2098  *  This function actually turns the device on. Before this  function is
2099  *  called,all Registers are configured from their reset states
2100  *  and shared memory is allocated but the NIC is still quiescent. On
2101  *  calling this function, the device interrupts are cleared and the NIC is
2102  *  literally switched on by writing into the adapter control register.
2103  *  Return Value:
2104  *  SUCCESS on success and -1 on failure.
2105  */
2106
2107 static int start_nic(struct s2io_nic *nic)
2108 {
2109         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2110         struct net_device *dev = nic->dev;
2111         register u64 val64 = 0;
2112         u16 subid, i;
2113         struct mac_info *mac_control;
2114         struct config_param *config;
2115
2116         mac_control = &nic->mac_control;
2117         config = &nic->config;
2118
2119         /*  PRC Initialization and configuration */
2120         for (i = 0; i < config->rx_ring_num; i++) {
2121                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2122                        &bar0->prc_rxd0_n[i]);
2123
2124                 val64 = readq(&bar0->prc_ctrl_n[i]);
2125                 if (nic->config.bimodal)
2126                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2127                 if (nic->rxd_mode == RXD_MODE_1)
2128                         val64 |= PRC_CTRL_RC_ENABLED;
2129                 else
2130                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2131                 if (nic->device_type == XFRAME_II_DEVICE)
2132                         val64 |= PRC_CTRL_GROUP_READS;
2133                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2134                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2135                 writeq(val64, &bar0->prc_ctrl_n[i]);
2136         }
2137
2138         if (nic->rxd_mode == RXD_MODE_3B) {
2139                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2140                 val64 = readq(&bar0->rx_pa_cfg);
2141                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2142                 writeq(val64, &bar0->rx_pa_cfg);
2143         }
2144
2145         if (vlan_tag_strip == 0) {
2146                 val64 = readq(&bar0->rx_pa_cfg);
2147                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2148                 writeq(val64, &bar0->rx_pa_cfg);
2149                 vlan_strip_flag = 0;
2150         }
2151
2152         /*
2153          * Enabling MC-RLDRAM. After enabling the device, we timeout
2154          * for around 100ms, which is approximately the time required
2155          * for the device to be ready for operation.
2156          */
2157         val64 = readq(&bar0->mc_rldram_mrs);
2158         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2159         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2160         val64 = readq(&bar0->mc_rldram_mrs);
2161
2162         msleep(100);    /* Delay by around 100 ms. */
2163
2164         /* Enabling ECC Protection. */
2165         val64 = readq(&bar0->adapter_control);
2166         val64 &= ~ADAPTER_ECC_EN;
2167         writeq(val64, &bar0->adapter_control);
2168
2169         /*
2170          * Verify if the device is ready to be enabled, if so enable
2171          * it.
2172          */
2173         val64 = readq(&bar0->adapter_status);
2174         if (!verify_xena_quiescence(nic)) {
2175                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2176                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2177                           (unsigned long long) val64);
2178                 return FAILURE;
2179         }
2180
2181         /*
2182          * With some switches, link might be already up at this point.
2183          * Because of this weird behavior, when we enable laser,
2184          * we may not get link. We need to handle this. We cannot
2185          * figure out which switch is misbehaving. So we are forced to
2186          * make a global change.
2187          */
2188
2189         /* Enabling Laser. */
2190         val64 = readq(&bar0->adapter_control);
2191         val64 |= ADAPTER_EOI_TX_ON;
2192         writeq(val64, &bar0->adapter_control);
2193
2194         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2195                 /*
2196                  * Dont see link state interrupts initally on some switches,
2197                  * so directly scheduling the link state task here.
2198                  */
2199                 schedule_work(&nic->set_link_task);
2200         }
2201         /* SXE-002: Initialize link and activity LED */
2202         subid = nic->pdev->subsystem_device;
2203         if (((subid & 0xFF) >= 0x07) &&
2204             (nic->device_type == XFRAME_I_DEVICE)) {
2205                 val64 = readq(&bar0->gpio_control);
2206                 val64 |= 0x0000800000000000ULL;
2207                 writeq(val64, &bar0->gpio_control);
2208                 val64 = 0x0411040400000000ULL;
2209                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2210         }
2211
2212         return SUCCESS;
2213 }
2214 /**
2215  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2216  */
2217 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2218                                         TxD *txdlp, int get_off)
2219 {
2220         struct s2io_nic *nic = fifo_data->nic;
2221         struct sk_buff *skb;
2222         struct TxD *txds;
2223         u16 j, frg_cnt;
2224
2225         txds = txdlp;
2226         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2227                 pci_unmap_single(nic->pdev, (dma_addr_t)
2228                         txds->Buffer_Pointer, sizeof(u64),
2229                         PCI_DMA_TODEVICE);
2230                 txds++;
2231         }
2232
2233         skb = (struct sk_buff *) ((unsigned long)
2234                         txds->Host_Control);
2235         if (!skb) {
2236                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2237                 return NULL;
2238         }
2239         pci_unmap_single(nic->pdev, (dma_addr_t)
2240                          txds->Buffer_Pointer,
2241                          skb->len - skb->data_len,
2242                          PCI_DMA_TODEVICE);
2243         frg_cnt = skb_shinfo(skb)->nr_frags;
2244         if (frg_cnt) {
2245                 txds++;
2246                 for (j = 0; j < frg_cnt; j++, txds++) {
2247                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2248                         if (!txds->Buffer_Pointer)
2249                                 break;
2250                         pci_unmap_page(nic->pdev, (dma_addr_t)
2251                                         txds->Buffer_Pointer,
2252                                        frag->size, PCI_DMA_TODEVICE);
2253                 }
2254         }
2255         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2256         return(skb);
2257 }
2258
2259 /**
2260  *  free_tx_buffers - Free all queued Tx buffers
2261  *  @nic : device private variable.
2262  *  Description:
2263  *  Free all queued Tx buffers.
2264  *  Return Value: void
2265 */
2266
2267 static void free_tx_buffers(struct s2io_nic *nic)
2268 {
2269         struct net_device *dev = nic->dev;
2270         struct sk_buff *skb;
2271         struct TxD *txdp;
2272         int i, j;
2273         struct mac_info *mac_control;
2274         struct config_param *config;
2275         int cnt = 0;
2276
2277         mac_control = &nic->mac_control;
2278         config = &nic->config;
2279
2280         for (i = 0; i < config->tx_fifo_num; i++) {
2281                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2282                         txdp = (struct TxD *) \
2283                         mac_control->fifos[i].list_info[j].list_virt_addr;
2284                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2285                         if (skb) {
2286                                 nic->mac_control.stats_info->sw_stat.mem_freed 
2287                                         += skb->truesize;
2288                                 dev_kfree_skb(skb);
2289                                 cnt++;
2290                         }
2291                 }
2292                 DBG_PRINT(INTR_DBG,
2293                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2294                           dev->name, cnt, i);
2295                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2296                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2297         }
2298 }
2299
2300 /**
2301  *   stop_nic -  To stop the nic
2302  *   @nic ; device private variable.
2303  *   Description:
2304  *   This function does exactly the opposite of what the start_nic()
2305  *   function does. This function is called to stop the device.
2306  *   Return Value:
2307  *   void.
2308  */
2309
2310 static void stop_nic(struct s2io_nic *nic)
2311 {
2312         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2313         register u64 val64 = 0;
2314         u16 interruptible;
2315         struct mac_info *mac_control;
2316         struct config_param *config;
2317
2318         mac_control = &nic->mac_control;
2319         config = &nic->config;
2320
2321         /*  Disable all interrupts */
2322         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2323         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2324         interruptible |= TX_PIC_INTR;
2325         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2326
2327         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2328         val64 = readq(&bar0->adapter_control);
2329         val64 &= ~(ADAPTER_CNTL_EN);
2330         writeq(val64, &bar0->adapter_control);
2331 }
2332
2333 /**
2334  *  fill_rx_buffers - Allocates the Rx side skbs
2335  *  @nic:  device private variable
2336  *  @ring_no: ring number
2337  *  Description:
2338  *  The function allocates Rx side skbs and puts the physical
2339  *  address of these buffers into the RxD buffer pointers, so that the NIC
2340  *  can DMA the received frame into these locations.
2341  *  The NIC supports 3 receive modes, viz
2342  *  1. single buffer,
2343  *  2. three buffer and
2344  *  3. Five buffer modes.
2345  *  Each mode defines how many fragments the received frame will be split
2346  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2347  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2348  *  is split into 3 fragments. As of now only single buffer mode is
2349  *  supported.
2350  *   Return Value:
2351  *  SUCCESS on success or an appropriate -ve value on failure.
2352  */
2353
2354 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2355 {
2356         struct net_device *dev = nic->dev;
2357         struct sk_buff *skb;
2358         struct RxD_t *rxdp;
2359         int off, off1, size, block_no, block_no1;
2360         u32 alloc_tab = 0;
2361         u32 alloc_cnt;
2362         struct mac_info *mac_control;
2363         struct config_param *config;
2364         u64 tmp;
2365         struct buffAdd *ba;
2366         unsigned long flags;
2367         struct RxD_t *first_rxdp = NULL;
2368         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2369         struct RxD1 *rxdp1;
2370         struct RxD3 *rxdp3;
2371         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2372
2373         mac_control = &nic->mac_control;
2374         config = &nic->config;
2375         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2376             atomic_read(&nic->rx_bufs_left[ring_no]);
2377
2378         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2379         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2380         while (alloc_tab < alloc_cnt) {
2381                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2382                     block_index;
2383                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2384
2385                 rxdp = mac_control->rings[ring_no].
2386                                 rx_blocks[block_no].rxds[off].virt_addr;
2387
2388                 if ((block_no == block_no1) && (off == off1) &&
2389                                         (rxdp->Host_Control)) {
2390                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2391                                   dev->name);
2392                         DBG_PRINT(INTR_DBG, " info equated\n");
2393                         goto end;
2394                 }
2395                 if (off && (off == rxd_count[nic->rxd_mode])) {
2396                         mac_control->rings[ring_no].rx_curr_put_info.
2397                             block_index++;
2398                         if (mac_control->rings[ring_no].rx_curr_put_info.
2399                             block_index == mac_control->rings[ring_no].
2400                                         block_count)
2401                                 mac_control->rings[ring_no].rx_curr_put_info.
2402                                         block_index = 0;
2403                         block_no = mac_control->rings[ring_no].
2404                                         rx_curr_put_info.block_index;
2405                         if (off == rxd_count[nic->rxd_mode])
2406                                 off = 0;
2407                         mac_control->rings[ring_no].rx_curr_put_info.
2408                                 offset = off;
2409                         rxdp = mac_control->rings[ring_no].
2410                                 rx_blocks[block_no].block_virt_addr;
2411                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2412                                   dev->name, rxdp);
2413                 }
2414                 if(!napi) {
2415                         spin_lock_irqsave(&nic->put_lock, flags);
2416                         mac_control->rings[ring_no].put_pos =
2417                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2418                         spin_unlock_irqrestore(&nic->put_lock, flags);
2419                 } else {
2420                         mac_control->rings[ring_no].put_pos =
2421                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2422                 }
2423                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2424                         ((nic->rxd_mode == RXD_MODE_3B) &&
2425                                 (rxdp->Control_2 & BIT(0)))) {
2426                         mac_control->rings[ring_no].rx_curr_put_info.
2427                                         offset = off;
2428                         goto end;
2429                 }
2430                 /* calculate size of skb based on ring mode */
2431                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2432                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2433                 if (nic->rxd_mode == RXD_MODE_1)
2434                         size += NET_IP_ALIGN;
2435                 else
2436                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2437
2438                 /* allocate skb */
2439                 skb = dev_alloc_skb(size);
2440                 if(!skb) {
2441                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2442                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2443                         if (first_rxdp) {
2444                                 wmb();
2445                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2446                         }
2447                         nic->mac_control.stats_info->sw_stat. \
2448                                 mem_alloc_fail_cnt++;
2449                         return -ENOMEM ;
2450                 }
2451                 nic->mac_control.stats_info->sw_stat.mem_allocated 
2452                         += skb->truesize;
2453                 if (nic->rxd_mode == RXD_MODE_1) {
2454                         /* 1 buffer mode - normal operation mode */
2455                         rxdp1 = (struct RxD1*)rxdp;
2456                         memset(rxdp, 0, sizeof(struct RxD1));
2457                         skb_reserve(skb, NET_IP_ALIGN);
2458                         rxdp1->Buffer0_ptr = pci_map_single
2459                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2460                                 PCI_DMA_FROMDEVICE);
2461                         if( (rxdp1->Buffer0_ptr == 0) ||
2462                                 (rxdp1->Buffer0_ptr ==
2463                                 DMA_ERROR_CODE))
2464                                 goto pci_map_failed;
2465
2466                         rxdp->Control_2 = 
2467                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2468
2469                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2470                         /*
2471                          * 2 buffer mode -
2472                          * 2 buffer mode provides 128
2473                          * byte aligned receive buffers.
2474                          */
2475
2476                         rxdp3 = (struct RxD3*)rxdp;
2477                         /* save buffer pointers to avoid frequent dma mapping */
2478                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2479                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2480                         memset(rxdp, 0, sizeof(struct RxD3));
2481                         /* restore the buffer pointers for dma sync*/
2482                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2483                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2484
2485                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2486                         skb_reserve(skb, BUF0_LEN);
2487                         tmp = (u64)(unsigned long) skb->data;
2488                         tmp += ALIGN_SIZE;
2489                         tmp &= ~ALIGN_SIZE;
2490                         skb->data = (void *) (unsigned long)tmp;
2491                         skb_reset_tail_pointer(skb);
2492
2493                         if (!(rxdp3->Buffer0_ptr))
2494                                 rxdp3->Buffer0_ptr =
2495                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2496                                            PCI_DMA_FROMDEVICE);
2497                         else
2498                                 pci_dma_sync_single_for_device(nic->pdev,
2499                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2500                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2501                         if( (rxdp3->Buffer0_ptr == 0) ||
2502                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2503                                 goto pci_map_failed;
2504
2505                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2506                         if (nic->rxd_mode == RXD_MODE_3B) {
2507                                 /* Two buffer mode */
2508
2509                                 /*
2510                                  * Buffer2 will have L3/L4 header plus
2511                                  * L4 payload
2512                                  */
2513                                 rxdp3->Buffer2_ptr = pci_map_single
2514                                 (nic->pdev, skb->data, dev->mtu + 4,
2515                                                 PCI_DMA_FROMDEVICE);
2516
2517                                 if( (rxdp3->Buffer2_ptr == 0) ||
2518                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2519                                         goto pci_map_failed;
2520
2521                                 rxdp3->Buffer1_ptr =
2522                                                 pci_map_single(nic->pdev,
2523                                                 ba->ba_1, BUF1_LEN,
2524                                                 PCI_DMA_FROMDEVICE);
2525                                 if( (rxdp3->Buffer1_ptr == 0) ||
2526                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2527                                         pci_unmap_single
2528                                                 (nic->pdev,
2529                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2530                                                 dev->mtu + 4,
2531                                                 PCI_DMA_FROMDEVICE);
2532                                         goto pci_map_failed;
2533                                 }
2534                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2535                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2536                                                                 (dev->mtu + 4);
2537                         }
2538                         rxdp->Control_2 |= BIT(0);
2539                 }
2540                 rxdp->Host_Control = (unsigned long) (skb);
2541                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2542                         rxdp->Control_1 |= RXD_OWN_XENA;
2543                 off++;
2544                 if (off == (rxd_count[nic->rxd_mode] + 1))
2545                         off = 0;
2546                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2547
2548                 rxdp->Control_2 |= SET_RXD_MARKER;
2549                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2550                         if (first_rxdp) {
2551                                 wmb();
2552                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2553                         }
2554                         first_rxdp = rxdp;
2555                 }
2556                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2557                 alloc_tab++;
2558         }
2559
2560       end:
2561         /* Transfer ownership of first descriptor to adapter just before
2562          * exiting. Before that, use memory barrier so that ownership
2563          * and other fields are seen by adapter correctly.
2564          */
2565         if (first_rxdp) {
2566                 wmb();
2567                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2568         }
2569
2570         return SUCCESS;
2571 pci_map_failed:
2572         stats->pci_map_fail_cnt++;
2573         stats->mem_freed += skb->truesize;
2574         dev_kfree_skb_irq(skb);
2575         return -ENOMEM;
2576 }
2577
2578 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2579 {
2580         struct net_device *dev = sp->dev;
2581         int j;
2582         struct sk_buff *skb;
2583         struct RxD_t *rxdp;
2584         struct mac_info *mac_control;
2585         struct buffAdd *ba;
2586         struct RxD1 *rxdp1;
2587         struct RxD3 *rxdp3;
2588
2589         mac_control = &sp->mac_control;
2590         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2591                 rxdp = mac_control->rings[ring_no].
2592                                 rx_blocks[blk].rxds[j].virt_addr;
2593                 skb = (struct sk_buff *)
2594                         ((unsigned long) rxdp->Host_Control);
2595                 if (!skb) {
2596                         continue;
2597                 }
2598                 if (sp->rxd_mode == RXD_MODE_1) {
2599                         rxdp1 = (struct RxD1*)rxdp;
2600                         pci_unmap_single(sp->pdev, (dma_addr_t)
2601                                 rxdp1->Buffer0_ptr,
2602                                 dev->mtu +
2603                                 HEADER_ETHERNET_II_802_3_SIZE
2604                                 + HEADER_802_2_SIZE +
2605                                 HEADER_SNAP_SIZE,
2606                                 PCI_DMA_FROMDEVICE);
2607                         memset(rxdp, 0, sizeof(struct RxD1));
2608                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2609                         rxdp3 = (struct RxD3*)rxdp;
2610                         ba = &mac_control->rings[ring_no].
2611                                 ba[blk][j];
2612                         pci_unmap_single(sp->pdev, (dma_addr_t)
2613                                 rxdp3->Buffer0_ptr,
2614                                 BUF0_LEN,
2615                                 PCI_DMA_FROMDEVICE);
2616                         pci_unmap_single(sp->pdev, (dma_addr_t)
2617                                 rxdp3->Buffer1_ptr,
2618                                 BUF1_LEN,
2619                                 PCI_DMA_FROMDEVICE);
2620                         pci_unmap_single(sp->pdev, (dma_addr_t)
2621                                 rxdp3->Buffer2_ptr,
2622                                 dev->mtu + 4,
2623                                 PCI_DMA_FROMDEVICE);
2624                         memset(rxdp, 0, sizeof(struct RxD3));
2625                 }
2626                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2627                 dev_kfree_skb(skb);
2628                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2629         }
2630 }
2631
2632 /**
2633  *  free_rx_buffers - Frees all Rx buffers
2634  *  @sp: device private variable.
2635  *  Description:
2636  *  This function will free all Rx buffers allocated by host.
2637  *  Return Value:
2638  *  NONE.
2639  */
2640
2641 static void free_rx_buffers(struct s2io_nic *sp)
2642 {
2643         struct net_device *dev = sp->dev;
2644         int i, blk = 0, buf_cnt = 0;
2645         struct mac_info *mac_control;
2646         struct config_param *config;
2647
2648         mac_control = &sp->mac_control;
2649         config = &sp->config;
2650
2651         for (i = 0; i < config->rx_ring_num; i++) {
2652                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2653                         free_rxd_blk(sp,i,blk);
2654
2655                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2656                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2657                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2658                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2659                 atomic_set(&sp->rx_bufs_left[i], 0);
2660                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2661                           dev->name, buf_cnt, i);
2662         }
2663 }
2664
2665 /**
2666  * s2io_poll - Rx interrupt handler for NAPI support
2667  * @napi : pointer to the napi structure.
2668  * @budget : The number of packets that were budgeted to be processed
2669  * during  one pass through the 'Poll" function.
2670  * Description:
2671  * Comes into picture only if NAPI support has been incorporated. It does
2672  * the same thing that rx_intr_handler does, but not in a interrupt context
2673  * also It will process only a given number of packets.
2674  * Return value:
2675  * 0 on success and 1 if there are No Rx packets to be processed.
2676  */
2677
2678 static int s2io_poll(struct napi_struct *napi, int budget)
2679 {
2680         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2681         struct net_device *dev = nic->dev;
2682         int pkt_cnt = 0, org_pkts_to_process;
2683         struct mac_info *mac_control;
2684         struct config_param *config;
2685         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2686         int i;
2687
2688         atomic_inc(&nic->isr_cnt);
2689         mac_control = &nic->mac_control;
2690         config = &nic->config;
2691
2692         nic->pkts_to_process = budget;
2693         org_pkts_to_process = nic->pkts_to_process;
2694
2695         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2696         readl(&bar0->rx_traffic_int);
2697
2698         for (i = 0; i < config->rx_ring_num; i++) {
2699                 rx_intr_handler(&mac_control->rings[i]);
2700                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2701                 if (!nic->pkts_to_process) {
2702                         /* Quota for the current iteration has been met */
2703                         goto no_rx;
2704                 }
2705         }
2706
2707         netif_rx_complete(dev, napi);
2708
2709         for (i = 0; i < config->rx_ring_num; i++) {
2710                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2711                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2712                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2713                         break;
2714                 }
2715         }
2716         /* Re enable the Rx interrupts. */
2717         writeq(0x0, &bar0->rx_traffic_mask);
2718         readl(&bar0->rx_traffic_mask);
2719         atomic_dec(&nic->isr_cnt);
2720         return pkt_cnt;
2721
2722 no_rx:
2723         for (i = 0; i < config->rx_ring_num; i++) {
2724                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2725                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2726                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2727                         break;
2728                 }
2729         }
2730         atomic_dec(&nic->isr_cnt);
2731         return pkt_cnt;
2732 }
2733
2734 #ifdef CONFIG_NET_POLL_CONTROLLER
2735 /**
2736  * s2io_netpoll - netpoll event handler entry point
2737  * @dev : pointer to the device structure.
2738  * Description:
2739  *      This function will be called by upper layer to check for events on the
2740  * interface in situations where interrupts are disabled. It is used for
2741  * specific in-kernel networking tasks, such as remote consoles and kernel
2742  * debugging over the network (example netdump in RedHat).
2743  */
2744 static void s2io_netpoll(struct net_device *dev)
2745 {
2746         struct s2io_nic *nic = dev->priv;
2747         struct mac_info *mac_control;
2748         struct config_param *config;
2749         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2750         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2751         int i;
2752
2753         if (pci_channel_offline(nic->pdev))
2754                 return;
2755
2756         disable_irq(dev->irq);
2757
2758         atomic_inc(&nic->isr_cnt);
2759         mac_control = &nic->mac_control;
2760         config = &nic->config;
2761
2762         writeq(val64, &bar0->rx_traffic_int);
2763         writeq(val64, &bar0->tx_traffic_int);
2764
2765         /* we need to free up the transmitted skbufs or else netpoll will
2766          * run out of skbs and will fail and eventually netpoll application such
2767          * as netdump will fail.
2768          */
2769         for (i = 0; i < config->tx_fifo_num; i++)
2770                 tx_intr_handler(&mac_control->fifos[i]);
2771
2772         /* check for received packet and indicate up to network */
2773         for (i = 0; i < config->rx_ring_num; i++)
2774                 rx_intr_handler(&mac_control->rings[i]);
2775
2776         for (i = 0; i < config->rx_ring_num; i++) {
2777                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2778                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2779                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2780                         break;
2781                 }
2782         }
2783         atomic_dec(&nic->isr_cnt);
2784         enable_irq(dev->irq);
2785         return;
2786 }
2787 #endif
2788
2789 /**
2790  *  rx_intr_handler - Rx interrupt handler
2791  *  @nic: device private variable.
2792  *  Description:
2793  *  If the interrupt is because of a received frame or if the
2794  *  receive ring contains fresh as yet un-processed frames,this function is
2795  *  called. It picks out the RxD at which place the last Rx processing had
2796  *  stopped and sends the skb to the OSM's Rx handler and then increments
2797  *  the offset.
2798  *  Return Value:
2799  *  NONE.
2800  */
2801 static void rx_intr_handler(struct ring_info *ring_data)
2802 {
2803         struct s2io_nic *nic = ring_data->nic;
2804         struct net_device *dev = (struct net_device *) nic->dev;
2805         int get_block, put_block, put_offset;
2806         struct rx_curr_get_info get_info, put_info;
2807         struct RxD_t *rxdp;
2808         struct sk_buff *skb;
2809         int pkt_cnt = 0;
2810         int i;
2811         struct RxD1* rxdp1;
2812         struct RxD3* rxdp3;
2813
2814         spin_lock(&nic->rx_lock);
2815         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2816                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2817                           __FUNCTION__, dev->name);
2818                 spin_unlock(&nic->rx_lock);
2819                 return;
2820         }
2821
2822         get_info = ring_data->rx_curr_get_info;
2823         get_block = get_info.block_index;
2824         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2825         put_block = put_info.block_index;
2826         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2827         if (!napi) {
2828                 spin_lock(&nic->put_lock);
2829                 put_offset = ring_data->put_pos;
2830                 spin_unlock(&nic->put_lock);
2831         } else
2832                 put_offset = ring_data->put_pos;
2833
2834         while (RXD_IS_UP2DT(rxdp)) {
2835                 /*
2836                  * If your are next to put index then it's
2837                  * FIFO full condition
2838                  */
2839                 if ((get_block == put_block) &&
2840                     (get_info.offset + 1) == put_info.offset) {
2841                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2842                         break;
2843                 }
2844                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2845                 if (skb == NULL) {
2846                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2847                                   dev->name);
2848                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2849                         spin_unlock(&nic->rx_lock);
2850                         return;
2851                 }
2852                 if (nic->rxd_mode == RXD_MODE_1) {
2853                         rxdp1 = (struct RxD1*)rxdp;
2854                         pci_unmap_single(nic->pdev, (dma_addr_t)
2855                                 rxdp1->Buffer0_ptr,
2856                                 dev->mtu +
2857                                 HEADER_ETHERNET_II_802_3_SIZE +
2858                                 HEADER_802_2_SIZE +
2859                                 HEADER_SNAP_SIZE,
2860                                 PCI_DMA_FROMDEVICE);
2861                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2862                         rxdp3 = (struct RxD3*)rxdp;
2863                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2864                                 rxdp3->Buffer0_ptr,
2865                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2866                         pci_unmap_single(nic->pdev, (dma_addr_t)
2867                                 rxdp3->Buffer2_ptr,
2868                                 dev->mtu + 4,
2869                                 PCI_DMA_FROMDEVICE);
2870                 }
2871                 prefetch(skb->data);
2872                 rx_osm_handler(ring_data, rxdp);
2873                 get_info.offset++;
2874                 ring_data->rx_curr_get_info.offset = get_info.offset;
2875                 rxdp = ring_data->rx_blocks[get_block].
2876                                 rxds[get_info.offset].virt_addr;
2877                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2878                         get_info.offset = 0;
2879                         ring_data->rx_curr_get_info.offset = get_info.offset;
2880                         get_block++;
2881                         if (get_block == ring_data->block_count)
2882                                 get_block = 0;
2883                         ring_data->rx_curr_get_info.block_index = get_block;
2884                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2885                 }
2886
2887                 nic->pkts_to_process -= 1;
2888                 if ((napi) && (!nic->pkts_to_process))
2889                         break;
2890                 pkt_cnt++;
2891                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2892                         break;
2893         }
2894         if (nic->lro) {
2895                 /* Clear all LRO sessions before exiting */
2896                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2897                         struct lro *lro = &nic->lro0_n[i];
2898                         if (lro->in_use) {
2899                                 update_L3L4_header(nic, lro);
2900                                 queue_rx_frame(lro->parent);
2901                                 clear_lro_session(lro);
2902                         }
2903                 }
2904         }
2905
2906         spin_unlock(&nic->rx_lock);
2907 }
2908
2909 /**
2910  *  tx_intr_handler - Transmit interrupt handler
2911  *  @nic : device private variable
2912  *  Description:
2913  *  If an interrupt was raised to indicate DMA complete of the
2914  *  Tx packet, this function is called. It identifies the last TxD
2915  *  whose buffer was freed and frees all skbs whose data have already
2916  *  DMA'ed into the NICs internal memory.
2917  *  Return Value:
2918  *  NONE
2919  */
2920
2921 static void tx_intr_handler(struct fifo_info *fifo_data)
2922 {
2923         struct s2io_nic *nic = fifo_data->nic;
2924         struct net_device *dev = (struct net_device *) nic->dev;
2925         struct tx_curr_get_info get_info, put_info;
2926         struct sk_buff *skb;
2927         struct TxD *txdlp;
2928         u8 err_mask;
2929
2930         get_info = fifo_data->tx_curr_get_info;
2931         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2932         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2933             list_virt_addr;
2934         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2935                (get_info.offset != put_info.offset) &&
2936                (txdlp->Host_Control)) {
2937                 /* Check for TxD errors */
2938                 if (txdlp->Control_1 & TXD_T_CODE) {
2939                         unsigned long long err;
2940                         err = txdlp->Control_1 & TXD_T_CODE;
2941                         if (err & 0x1) {
2942                                 nic->mac_control.stats_info->sw_stat.
2943                                                 parity_err_cnt++;
2944                         }
2945
2946                         /* update t_code statistics */
2947                         err_mask = err >> 48;
2948                         switch(err_mask) {
2949                                 case 2:
2950                                         nic->mac_control.stats_info->sw_stat.
2951                                                         tx_buf_abort_cnt++;
2952                                 break;
2953
2954                                 case 3:
2955                                         nic->mac_control.stats_info->sw_stat.
2956                                                         tx_desc_abort_cnt++;
2957                                 break;
2958
2959                                 case 7:
2960                                         nic->mac_control.stats_info->sw_stat.
2961                                                         tx_parity_err_cnt++;
2962                                 break;
2963
2964                                 case 10:
2965                                         nic->mac_control.stats_info->sw_stat.
2966                                                         tx_link_loss_cnt++;
2967                                 break;
2968
2969                                 case 15:
2970                                         nic->mac_control.stats_info->sw_stat.
2971                                                         tx_list_proc_err_cnt++;
2972                                 break;
2973                         }
2974                 }
2975
2976                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2977                 if (skb == NULL) {
2978                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2979                         __FUNCTION__);
2980                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2981                         return;
2982                 }
2983
2984                 /* Updating the statistics block */
2985                 nic->stats.tx_bytes += skb->len;
2986                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2987                 dev_kfree_skb_irq(skb);
2988
2989                 get_info.offset++;
2990                 if (get_info.offset == get_info.fifo_len + 1)
2991                         get_info.offset = 0;
2992                 txdlp = (struct TxD *) fifo_data->list_info
2993                     [get_info.offset].list_virt_addr;
2994                 fifo_data->tx_curr_get_info.offset =
2995                     get_info.offset;
2996         }
2997
2998         spin_lock(&nic->tx_lock);
2999         if (netif_queue_stopped(dev))
3000                 netif_wake_queue(dev);
3001         spin_unlock(&nic->tx_lock);
3002 }
3003
3004 /**
3005  *  s2io_mdio_write - Function to write in to MDIO registers
3006  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3007  *  @addr     : address value
3008  *  @value    : data value
3009  *  @dev      : pointer to net_device structure
3010  *  Description:
3011  *  This function is used to write values to the MDIO registers
3012  *  NONE
3013  */
3014 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3015 {
3016         u64 val64 = 0x0;
3017         struct s2io_nic *sp = dev->priv;
3018         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3019
3020         //address transaction
3021         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3022                         | MDIO_MMD_DEV_ADDR(mmd_type)
3023                         | MDIO_MMS_PRT_ADDR(0x0);
3024         writeq(val64, &bar0->mdio_control);
3025         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3026         writeq(val64, &bar0->mdio_control);
3027         udelay(100);
3028
3029         //Data transaction
3030         val64 = 0x0;
3031         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3032                         | MDIO_MMD_DEV_ADDR(mmd_type)
3033                         | MDIO_MMS_PRT_ADDR(0x0)
3034                         | MDIO_MDIO_DATA(value)
3035                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3036         writeq(val64, &bar0->mdio_control);
3037         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3038         writeq(val64, &bar0->mdio_control);
3039         udelay(100);
3040
3041         val64 = 0x0;
3042         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3043         | MDIO_MMD_DEV_ADDR(mmd_type)
3044         | MDIO_MMS_PRT_ADDR(0x0)
3045         | MDIO_OP(MDIO_OP_READ_TRANS);
3046         writeq(val64, &bar0->mdio_control);
3047         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3048         writeq(val64, &bar0->mdio_control);
3049         udelay(100);
3050
3051 }
3052
3053 /**
3054  *  s2io_mdio_read - Function to write in to MDIO registers
3055  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3056  *  @addr     : address value
3057  *  @dev      : pointer to net_device structure
3058  *  Description:
3059  *  This function is used to read values to the MDIO registers
3060  *  NONE
3061  */
3062 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3063 {
3064         u64 val64 = 0x0;
3065         u64 rval64 = 0x0;
3066         struct s2io_nic *sp = dev->priv;
3067         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3068
3069         /* address transaction */
3070         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3071                         | MDIO_MMD_DEV_ADDR(mmd_type)
3072                         | MDIO_MMS_PRT_ADDR(0x0);
3073         writeq(val64, &bar0->mdio_control);
3074         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3075         writeq(val64, &bar0->mdio_control);
3076         udelay(100);
3077
3078         /* Data transaction */
3079         val64 = 0x0;
3080         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3081                         | MDIO_MMD_DEV_ADDR(mmd_type)
3082                         | MDIO_MMS_PRT_ADDR(0x0)
3083                         | MDIO_OP(MDIO_OP_READ_TRANS);
3084         writeq(val64, &bar0->mdio_control);
3085         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3086         writeq(val64, &bar0->mdio_control);
3087         udelay(100);
3088
3089         /* Read the value from regs */
3090         rval64 = readq(&bar0->mdio_control);
3091         rval64 = rval64 & 0xFFFF0000;
3092         rval64 = rval64 >> 16;
3093         return rval64;
3094 }
3095 /**
3096  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3097  *  @counter      : couter value to be updated
3098  *  @flag         : flag to indicate the status
3099  *  @type         : counter type
3100  *  Description:
3101  *  This function is to check the status of the xpak counters value
3102  *  NONE
3103  */
3104
3105 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3106 {
3107         u64 mask = 0x3;
3108         u64 val64;
3109         int i;
3110         for(i = 0; i <index; i++)
3111                 mask = mask << 0x2;
3112
3113         if(flag > 0)
3114         {
3115                 *counter = *counter + 1;
3116                 val64 = *regs_stat & mask;
3117                 val64 = val64 >> (index * 0x2);
3118                 val64 = val64 + 1;
3119                 if(val64 == 3)
3120                 {
3121                         switch(type)
3122                         {
3123                         case 1:
3124                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3125                                           "service. Excessive temperatures may "
3126                                           "result in premature transceiver "
3127                                           "failure \n");
3128                         break;
3129                         case 2:
3130                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3131                                           "service Excessive bias currents may "
3132                                           "indicate imminent laser diode "
3133                                           "failure \n");
3134                         break;
3135                         case 3:
3136                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3137                                           "service Excessive laser output "
3138                                           "power may saturate far-end "
3139                                           "receiver\n");
3140                         break;
3141                         default:
3142                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3143                                           "type \n");
3144                         }
3145                         val64 = 0x0;
3146                 }
3147                 val64 = val64 << (index * 0x2);
3148                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3149
3150         } else {
3151                 *regs_stat = *regs_stat & (~mask);
3152         }
3153 }
3154
3155 /**
3156  *  s2io_updt_xpak_counter - Function to update the xpak counters
3157  *  @dev         : pointer to net_device struct
3158  *  Description:
3159  *  This function is to upate the status of the xpak counters value
3160  *  NONE
3161  */
3162 static void s2io_updt_xpak_counter(struct net_device *dev)
3163 {
3164         u16 flag  = 0x0;
3165         u16 type  = 0x0;
3166         u16 val16 = 0x0;
3167         u64 val64 = 0x0;
3168         u64 addr  = 0x0;
3169
3170         struct s2io_nic *sp = dev->priv;
3171         struct stat_block *stat_info = sp->mac_control.stats_info;
3172
3173         /* Check the communication with the MDIO slave */
3174         addr = 0x0000;
3175         val64 = 0x0;
3176         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3177         if((val64 == 0xFFFF) || (val64 == 0x0000))
3178         {
3179                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3180                           "Returned %llx\n", (unsigned long long)val64);
3181                 return;
3182         }
3183
3184         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3185         if(val64 != 0x2040)
3186         {
3187                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3188                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3189                           (unsigned long long)val64);
3190                 return;
3191         }
3192
3193         /* Loading the DOM register to MDIO register */
3194         addr = 0xA100;
3195         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3196         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3197
3198         /* Reading the Alarm flags */
3199         addr = 0xA070;
3200         val64 = 0x0;
3201         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3202
3203         flag = CHECKBIT(val64, 0x7);
3204         type = 1;
3205         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3206                                 &stat_info->xpak_stat.xpak_regs_stat,
3207                                 0x0, flag, type);
3208
3209         if(CHECKBIT(val64, 0x6))
3210                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3211
3212         flag = CHECKBIT(val64, 0x3);
3213         type = 2;
3214         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3215                                 &stat_info->xpak_stat.xpak_regs_stat,
3216                                 0x2, flag, type);
3217
3218         if(CHECKBIT(val64, 0x2))
3219                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3220
3221         flag = CHECKBIT(val64, 0x1);
3222         type = 3;
3223         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3224                                 &stat_info->xpak_stat.xpak_regs_stat,
3225                                 0x4, flag, type);
3226
3227         if(CHECKBIT(val64, 0x0))
3228                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3229
3230         /* Reading the Warning flags */
3231         addr = 0xA074;
3232         val64 = 0x0;
3233         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3234
3235         if(CHECKBIT(val64, 0x7))
3236                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3237
3238         if(CHECKBIT(val64, 0x6))
3239                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3240
3241         if(CHECKBIT(val64, 0x3))
3242                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3243
3244         if(CHECKBIT(val64, 0x2))
3245                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3246
3247         if(CHECKBIT(val64, 0x1))
3248                 stat_info->xpak_stat.warn_laser_output_power_high++;
3249
3250         if(CHECKBIT(val64, 0x0))
3251                 stat_info->xpak_stat.warn_laser_output_power_low++;
3252 }
3253
3254 /**
3255  *  alarm_intr_handler - Alarm Interrrupt handler
3256  *  @nic: device private variable
3257  *  Description: If the interrupt was neither because of Rx packet or Tx
3258  *  complete, this function is called. If the interrupt was to indicate
3259  *  a loss of link, the OSM link status handler is invoked for any other
3260  *  alarm interrupt the block that raised the interrupt is displayed
3261  *  and a H/W reset is issued.
3262  *  Return Value:
3263  *  NONE
3264 */
3265
3266 static void alarm_intr_handler(struct s2io_nic *nic)
3267 {
3268         struct net_device *dev = (struct net_device *) nic->dev;
3269         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3270         register u64 val64 = 0, err_reg = 0;
3271         u64 cnt;
3272         int i;
3273         if (atomic_read(&nic->card_state) == CARD_DOWN)
3274                 return;
3275         if (pci_channel_offline(nic->pdev))
3276                 return;
3277         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3278         /* Handling the XPAK counters update */
3279         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3280                 /* waiting for an hour */
3281                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3282         } else {
3283                 s2io_updt_xpak_counter(dev);
3284                 /* reset the count to zero */
3285                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3286         }
3287
3288         /* Handling link status change error Intr */
3289         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3290                 err_reg = readq(&bar0->mac_rmac_err_reg);
3291                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3292                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3293                         schedule_work(&nic->set_link_task);
3294                 }
3295         }
3296
3297         /* Handling Ecc errors */
3298         val64 = readq(&bar0->mc_err_reg);
3299         writeq(val64, &bar0->mc_err_reg);
3300         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3301                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3302                         nic->mac_control.stats_info->sw_stat.
3303                                 double_ecc_errs++;
3304                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3305                                   dev->name);
3306                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3307                         if (nic->device_type != XFRAME_II_DEVICE) {
3308                                 /* Reset XframeI only if critical error */
3309                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3310                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3311                                         netif_stop_queue(dev);
3312                                         schedule_work(&nic->rst_timer_task);
3313                                         nic->mac_control.stats_info->sw_stat.
3314                                                         soft_reset_cnt++;
3315                                 }
3316                         }
3317                 } else {
3318                         nic->mac_control.stats_info->sw_stat.
3319                                 single_ecc_errs++;
3320                 }
3321         }
3322
3323         /* In case of a serious error, the device will be Reset. */
3324         val64 = readq(&bar0->serr_source);
3325         if (val64 & SERR_SOURCE_ANY) {
3326                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3327                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3328                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3329                           (unsigned long long)val64);
3330                 netif_stop_queue(dev);
3331                 schedule_work(&nic->rst_timer_task);
3332                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3333         }
3334
3335         /*
3336          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3337          * Error occurs, the adapter will be recycled by disabling the
3338          * adapter enable bit and enabling it again after the device
3339          * becomes Quiescent.
3340          */
3341         val64 = readq(&bar0->pcc_err_reg);
3342         writeq(val64, &bar0->pcc_err_reg);
3343         if (val64 & PCC_FB_ECC_DB_ERR) {
3344                 u64 ac = readq(&bar0->adapter_control);
3345                 ac &= ~(ADAPTER_CNTL_EN);
3346                 writeq(ac, &bar0->adapter_control);
3347                 ac = readq(&bar0->adapter_control);
3348                 schedule_work(&nic->set_link_task);
3349         }
3350         /* Check for data parity error */
3351         val64 = readq(&bar0->pic_int_status);
3352         if (val64 & PIC_INT_GPIO) {
3353                 val64 = readq(&bar0->gpio_int_reg);
3354                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3355                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3356                         schedule_work(&nic->rst_timer_task);
3357                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3358                 }
3359         }
3360
3361         /* Check for ring full counter */
3362         if (nic->device_type & XFRAME_II_DEVICE) {
3363                 val64 = readq(&bar0->ring_bump_counter1);
3364                 for (i=0; i<4; i++) {
3365                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3366                         cnt >>= 64 - ((i+1)*16);
3367                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3368                                 += cnt;
3369                 }
3370
3371                 val64 = readq(&bar0->ring_bump_counter2);
3372                 for (i=0; i<4; i++) {
3373                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3374                         cnt >>= 64 - ((i+1)*16);
3375                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3376                                 += cnt;
3377                 }
3378         }
3379
3380         /* Other type of interrupts are not being handled now,  TODO */
3381 }
3382
3383 /**
3384  *  wait_for_cmd_complete - waits for a command to complete.
3385  *  @sp : private member of the device structure, which is a pointer to the
3386  *  s2io_nic structure.
3387  *  Description: Function that waits for a command to Write into RMAC
3388  *  ADDR DATA registers to be completed and returns either success or
3389  *  error depending on whether the command was complete or not.
3390  *  Return value:
3391  *   SUCCESS on success and FAILURE on failure.
3392  */
3393
3394 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3395                                 int bit_state)
3396 {
3397         int ret = FAILURE, cnt = 0, delay = 1;
3398         u64 val64;
3399
3400         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3401                 return FAILURE;
3402
3403         do {
3404                 val64 = readq(addr);
3405                 if (bit_state == S2IO_BIT_RESET) {
3406                         if (!(val64 & busy_bit)) {
3407                                 ret = SUCCESS;
3408                                 break;
3409                         }
3410                 } else {
3411                         if (!(val64 & busy_bit)) {
3412                                 ret = SUCCESS;
3413                                 break;
3414                         }
3415                 }
3416
3417                 if(in_interrupt())
3418                         mdelay(delay);
3419                 else
3420                         msleep(delay);
3421
3422                 if (++cnt >= 10)
3423                         delay = 50;
3424         } while (cnt < 20);
3425         return ret;
3426 }
3427 /*
3428  * check_pci_device_id - Checks if the device id is supported
3429  * @id : device id
3430  * Description: Function to check if the pci device id is supported by driver.
3431  * Return value: Actual device id if supported else PCI_ANY_ID
3432  */
3433 static u16 check_pci_device_id(u16 id)
3434 {
3435         switch (id) {
3436         case PCI_DEVICE_ID_HERC_WIN:
3437         case PCI_DEVICE_ID_HERC_UNI:
3438                 return XFRAME_II_DEVICE;
3439         case PCI_DEVICE_ID_S2IO_UNI:
3440         case PCI_DEVICE_ID_S2IO_WIN:
3441                 return XFRAME_I_DEVICE;
3442         default:
3443                 return PCI_ANY_ID;
3444         }
3445 }
3446
3447 /**
3448  *  s2io_reset - Resets the card.
3449  *  @sp : private member of the device structure.
3450  *  Description: Function to Reset the card. This function then also
3451  *  restores the previously saved PCI configuration space registers as
3452  *  the card reset also resets the configuration space.
3453  *  Return value:
3454  *  void.
3455  */
3456
3457 static void s2io_reset(struct s2io_nic * sp)
3458 {
3459         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3460         u64 val64;
3461         u16 subid, pci_cmd;
3462         int i;
3463         u16 val16;
3464         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3465         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3466
3467         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3468                         __FUNCTION__, sp->dev->name);
3469
3470         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3471         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3472
3473         val64 = SW_RESET_ALL;
3474         writeq(val64, &bar0->sw_reset);
3475         if (strstr(sp->product_name, "CX4")) {
3476                 msleep(750);
3477         }
3478         msleep(250);
3479         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3480
3481                 /* Restore the PCI state saved during initialization. */
3482                 pci_restore_state(sp->pdev);
3483                 pci_read_config_word(sp->pdev, 0x2, &val16);
3484                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3485                         break;
3486                 msleep(200);
3487         }
3488
3489         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3490                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3491         }
3492
3493         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3494
3495         s2io_init_pci(sp);
3496
3497         /* Set swapper to enable I/O register access */
3498         s2io_set_swapper(sp);
3499
3500         /* Restore the MSIX table entries from local variables */
3501         restore_xmsi_data(sp);
3502
3503         /* Clear certain PCI/PCI-X fields after reset */
3504         if (sp->device_type == XFRAME_II_DEVICE) {
3505                 /* Clear "detected parity error" bit */
3506                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3507
3508                 /* Clearing PCIX Ecc status register */
3509                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3510
3511                 /* Clearing PCI_STATUS error reflected here */
3512                 writeq(BIT(62), &bar0->txpic_int_reg);
3513         }
3514
3515         /* Reset device statistics maintained by OS */
3516         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3517         
3518         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3519         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3520         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3521         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3522         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3523         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3524         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3525         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3526         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3527         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3528         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3529         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3530         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3531         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3532         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3533         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3534         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3535         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3536         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3537
3538         /* SXE-002: Configure link and activity LED to turn it off */
3539         subid = sp->pdev->subsystem_device;
3540         if (((subid & 0xFF) >= 0x07) &&
3541             (sp->device_type == XFRAME_I_DEVICE)) {
3542                 val64 = readq(&bar0->gpio_control);
3543                 val64 |= 0x0000800000000000ULL;
3544                 writeq(val64, &bar0->gpio_control);
3545                 val64 = 0x0411040400000000ULL;
3546                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3547         }
3548
3549         /*
3550          * Clear spurious ECC interrupts that would have occured on
3551          * XFRAME II cards after reset.
3552          */
3553         if (sp->device_type == XFRAME_II_DEVICE) {
3554                 val64 = readq(&bar0->pcc_err_reg);
3555                 writeq(val64, &bar0->pcc_err_reg);
3556         }
3557
3558         /* restore the previously assigned mac address */
3559         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3560
3561         sp->device_enabled_once = FALSE;
3562 }
3563
3564 /**
3565  *  s2io_set_swapper - to set the swapper controle on the card
3566  *  @sp : private member of the device structure,
3567  *  pointer to the s2io_nic structure.
3568  *  Description: Function to set the swapper control on the card
3569  *  correctly depending on the 'endianness' of the system.
3570  *  Return value:
3571  *  SUCCESS on success and FAILURE on failure.
3572  */
3573
3574 static int s2io_set_swapper(struct s2io_nic * sp)
3575 {
3576         struct net_device *dev = sp->dev;
3577         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3578         u64 val64, valt, valr;
3579
3580         /*
3581          * Set proper endian settings and verify the same by reading
3582          * the PIF Feed-back register.
3583          */
3584
3585         val64 = readq(&bar0->pif_rd_swapper_fb);
3586         if (val64 != 0x0123456789ABCDEFULL) {
3587                 int i = 0;
3588                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3589                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3590                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3591                                 0};                     /* FE=0, SE=0 */
3592
3593                 while(i<4) {
3594                         writeq(value[i], &bar0->swapper_ctrl);
3595                         val64 = readq(&bar0->pif_rd_swapper_fb);
3596                         if (val64 == 0x0123456789ABCDEFULL)
3597                                 break;
3598                         i++;
3599                 }
3600                 if (i == 4) {
3601                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3602                                 dev->name);
3603                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3604                                 (unsigned long long) val64);
3605                         return FAILURE;
3606                 }
3607                 valr = value[i];
3608         } else {
3609                 valr = readq(&bar0->swapper_ctrl);
3610         }
3611
3612         valt = 0x0123456789ABCDEFULL;
3613         writeq(valt, &bar0->xmsi_address);
3614         val64 = readq(&bar0->xmsi_address);
3615
3616         if(val64 != valt) {
3617                 int i = 0;
3618                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3619                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3620                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3621                                 0};                     /* FE=0, SE=0 */
3622
3623                 while(i<4) {
3624                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3625                         writeq(valt, &bar0->xmsi_address);
3626                         val64 = readq(&bar0->xmsi_address);
3627                         if(val64 == valt)
3628                                 break;
3629                         i++;
3630                 }
3631                 if(i == 4) {
3632                         unsigned long long x = val64;
3633                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3634                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3635                         return FAILURE;
3636                 }
3637         }
3638         val64 = readq(&bar0->swapper_ctrl);
3639         val64 &= 0xFFFF000000000000ULL;
3640
3641 #ifdef  __BIG_ENDIAN
3642         /*
3643          * The device by default set to a big endian format, so a
3644          * big endian driver need not set anything.
3645          */
3646         val64 |= (SWAPPER_CTRL_TXP_FE |
3647                  SWAPPER_CTRL_TXP_SE |
3648                  SWAPPER_CTRL_TXD_R_FE |
3649                  SWAPPER_CTRL_TXD_W_FE |
3650                  SWAPPER_CTRL_TXF_R_FE |
3651                  SWAPPER_CTRL_RXD_R_FE |
3652                  SWAPPER_CTRL_RXD_W_FE |
3653                  SWAPPER_CTRL_RXF_W_FE |
3654                  SWAPPER_CTRL_XMSI_FE |
3655                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3656         if (sp->intr_type == INTA)
3657                 val64 |= SWAPPER_CTRL_XMSI_SE;
3658         writeq(val64, &bar0->swapper_ctrl);
3659 #else
3660         /*
3661          * Initially we enable all bits to make it accessible by the
3662          * driver, then we selectively enable only those bits that
3663          * we want to set.
3664          */
3665         val64 |= (SWAPPER_CTRL_TXP_FE |
3666                  SWAPPER_CTRL_TXP_SE |
3667                  SWAPPER_CTRL_TXD_R_FE |
3668                  SWAPPER_CTRL_TXD_R_SE |
3669                  SWAPPER_CTRL_TXD_W_FE |
3670                  SWAPPER_CTRL_TXD_W_SE |
3671                  SWAPPER_CTRL_TXF_R_FE |
3672                  SWAPPER_CTRL_RXD_R_FE |
3673                  SWAPPER_CTRL_RXD_R_SE |
3674                  SWAPPER_CTRL_RXD_W_FE |
3675                  SWAPPER_CTRL_RXD_W_SE |
3676                  SWAPPER_CTRL_RXF_W_FE |
3677                  SWAPPER_CTRL_XMSI_FE |
3678                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3679         if (sp->intr_type == INTA)
3680                 val64 |= SWAPPER_CTRL_XMSI_SE;
3681         writeq(val64, &bar0->swapper_ctrl);
3682 #endif
3683         val64 = readq(&bar0->swapper_ctrl);
3684
3685         /*
3686          * Verifying if endian settings are accurate by reading a
3687          * feedback register.
3688          */
3689         val64 = readq(&bar0->pif_rd_swapper_fb);
3690         if (val64 != 0x0123456789ABCDEFULL) {
3691                 /* Endian settings are incorrect, calls for another dekko. */
3692                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3693                           dev->name);
3694                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3695                           (unsigned long long) val64);
3696                 return FAILURE;
3697         }
3698
3699         return SUCCESS;
3700 }
3701
3702 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3703 {
3704         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3705         u64 val64;
3706         int ret = 0, cnt = 0;
3707
3708         do {
3709                 val64 = readq(&bar0->xmsi_access);
3710                 if (!(val64 & BIT(15)))
3711                         break;
3712                 mdelay(1);
3713                 cnt++;
3714         } while(cnt < 5);
3715         if (cnt == 5) {
3716                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3717                 ret = 1;
3718         }
3719
3720         return ret;
3721 }
3722
3723 static void restore_xmsi_data(struct s2io_nic *nic)
3724 {
3725         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3726         u64 val64;
3727         int i;
3728
3729         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3730                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3731                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3732                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3733                 writeq(val64, &bar0->xmsi_access);
3734                 if (wait_for_msix_trans(nic, i)) {
3735                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3736                         continue;
3737                 }
3738         }
3739 }
3740
3741 static void store_xmsi_data(struct s2io_nic *nic)
3742 {
3743         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3744         u64 val64, addr, data;
3745         int i;
3746
3747         /* Store and display */
3748         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3749                 val64 = (BIT(15) | vBIT(i, 26, 6));
3750                 writeq(val64, &bar0->xmsi_access);
3751                 if (wait_for_msix_trans(nic, i)) {
3752                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3753                         continue;
3754                 }
3755                 addr = readq(&bar0->xmsi_address);
3756                 data = readq(&bar0->xmsi_data);
3757                 if (addr && data) {
3758                         nic->msix_info[i].addr = addr;
3759                         nic->msix_info[i].data = data;
3760                 }
3761         }
3762 }
3763
3764 static int s2io_enable_msi_x(struct s2io_nic *nic)
3765 {
3766         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3767         u64 tx_mat, rx_mat;
3768         u16 msi_control; /* Temp variable */
3769         int ret, i, j, msix_indx = 1;
3770
3771         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3772                                GFP_KERNEL);
3773         if (nic->entries == NULL) {
3774                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3775                         __FUNCTION__);
3776                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3777                 return -ENOMEM;
3778         }
3779         nic->mac_control.stats_info->sw_stat.mem_allocated 
3780                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3781         memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3782
3783         nic->s2io_entries =
3784                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3785                                    GFP_KERNEL);
3786         if (nic->s2io_entries == NULL) {
3787                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 
3788                         __FUNCTION__);
3789                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3790                 kfree(nic->entries);
3791                 nic->mac_control.stats_info->sw_stat.mem_freed 
3792                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3793                 return -ENOMEM;
3794         }
3795          nic->mac_control.stats_info->sw_stat.mem_allocated 
3796                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3797         memset(nic->s2io_entries, 0,
3798                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3799
3800         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3801                 nic->entries[i].entry = i;
3802                 nic->s2io_entries[i].entry = i;
3803                 nic->s2io_entries[i].arg = NULL;
3804                 nic->s2io_entries[i].in_use = 0;
3805         }
3806
3807         tx_mat = readq(&bar0->tx_mat0_n[0]);
3808         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3809                 tx_mat |= TX_MAT_SET(i, msix_indx);
3810                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3811                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3812                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3813         }
3814         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3815
3816         if (!nic->config.bimodal) {
3817                 rx_mat = readq(&bar0->rx_mat);
3818                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3819                         rx_mat |= RX_MAT_SET(j, msix_indx);
3820                         nic->s2io_entries[msix_indx].arg 
3821                                 = &nic->mac_control.rings[j];
3822                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3823                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3824                 }
3825                 writeq(rx_mat, &bar0->rx_mat);
3826         } else {
3827                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3828                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3829                         tx_mat |= TX_MAT_SET(i, msix_indx);
3830                         nic->s2io_entries[msix_indx].arg 
3831                                 = &nic->mac_control.rings[j];
3832                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3833                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3834                 }
3835                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3836         }
3837
3838         nic->avail_msix_vectors = 0;
3839         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3840         /* We fail init if error or we get less vectors than min required */
3841         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3842                 nic->avail_msix_vectors = ret;
3843                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3844         }
3845         if (ret) {
3846                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3847                 kfree(nic->entries);
3848                 nic->mac_control.stats_info->sw_stat.mem_freed 
3849                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3850                 kfree(nic->s2io_entries);
3851                 nic->mac_control.stats_info->sw_stat.mem_freed 
3852                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3853                 nic->entries = NULL;
3854                 nic->s2io_entries = NULL;
3855                 nic->avail_msix_vectors = 0;
3856                 return -ENOMEM;
3857         }
3858         if (!nic->avail_msix_vectors)
3859                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3860
3861         /*
3862          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3863          * in the herc NIC. (Temp change, needs to be removed later)
3864          */
3865         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3866         msi_control |= 0x1; /* Enable MSI */
3867         pci_write_config_word(nic->pdev, 0x42, msi_control);
3868
3869         return 0;
3870 }
3871
3872 /* Handle software interrupt used during MSI(X) test */
3873 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3874 {
3875         struct s2io_nic *sp = dev_id;
3876
3877         sp->msi_detected = 1;
3878         wake_up(&sp->msi_wait);
3879
3880         return IRQ_HANDLED;
3881 }
3882
3883 /* Test interrupt path by forcing a a software IRQ */
3884 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3885 {
3886         struct pci_dev *pdev = sp->pdev;
3887         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3888         int err;
3889         u64 val64, saved64;
3890
3891         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3892                         sp->name, sp);
3893         if (err) {
3894                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3895                        sp->dev->name, pci_name(pdev), pdev->irq);
3896                 return err;
3897         }
3898
3899         init_waitqueue_head (&sp->msi_wait);
3900         sp->msi_detected = 0;
3901
3902         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3903         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3904         val64 |= SCHED_INT_CTRL_TIMER_EN;
3905         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3906         writeq(val64, &bar0->scheduled_int_ctrl);
3907
3908         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3909
3910         if (!sp->msi_detected) {
3911                 /* MSI(X) test failed, go back to INTx mode */
3912                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3913                         "using MSI(X) during test\n", sp->dev->name,
3914                         pci_name(pdev));
3915
3916                 err = -EOPNOTSUPP;
3917         }
3918
3919         free_irq(sp->entries[1].vector, sp);
3920
3921         writeq(saved64, &bar0->scheduled_int_ctrl);
3922
3923         return err;
3924 }
3925 /* ********************************************************* *
3926  * Functions defined below concern the OS part of the driver *
3927  * ********************************************************* */
3928
3929 /**
3930  *  s2io_open - open entry point of the driver
3931  *  @dev : pointer to the device structure.
3932  *  Description:
3933  *  This function is the open entry point of the driver. It mainly calls a
3934  *  function to allocate Rx buffers and inserts them into the buffer
3935  *  descriptors and then enables the Rx part of the NIC.
3936  *  Return value:
3937  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3938  *   file on failure.
3939  */
3940
3941 static int s2io_open(struct net_device *dev)
3942 {
3943         struct s2io_nic *sp = dev->priv;
3944         int err = 0;
3945
3946         /*
3947          * Make sure you have link off by default every time
3948          * Nic is initialized
3949          */
3950         netif_carrier_off(dev);
3951         sp->last_link_state = 0;
3952
3953         napi_enable(&sp->napi);
3954
3955         if (sp->intr_type == MSI_X) {
3956                 int ret = s2io_enable_msi_x(sp);
3957
3958                 if (!ret) {
3959                         u16 msi_control;
3960
3961                         ret = s2io_test_msi(sp);
3962
3963                         /* rollback MSI-X, will re-enable during add_isr() */
3964                         kfree(sp->entries);
3965                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3966                                 (MAX_REQUESTED_MSI_X *
3967                                 sizeof(struct msix_entry));
3968                         kfree(sp->s2io_entries);
3969                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3970                                 (MAX_REQUESTED_MSI_X *
3971                                 sizeof(struct s2io_msix_entry));
3972                         sp->entries = NULL;
3973                         sp->s2io_entries = NULL;
3974
3975                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3976                         msi_control &= 0xFFFE; /* Disable MSI */
3977                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3978
3979                         pci_disable_msix(sp->pdev);
3980
3981                 }
3982                 if (ret) {
3983
3984                         DBG_PRINT(ERR_DBG,
3985                           "%s: MSI-X requested but failed to enable\n",
3986                           dev->name);
3987                         sp->intr_type = INTA;
3988                 }
3989         }
3990
3991         /* NAPI doesn't work well with MSI(X) */
3992          if (sp->intr_type != INTA) {
3993                 if(sp->config.napi)
3994                         sp->config.napi = 0;
3995         }
3996
3997         /* Initialize H/W and enable interrupts */
3998         err = s2io_card_up(sp);
3999         if (err) {
4000                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4001                           dev->name);
4002                 goto hw_init_failed;
4003         }
4004
4005         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
4006                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4007                 s2io_card_down(sp);
4008                 err = -ENODEV;
4009                 goto hw_init_failed;
4010         }
4011
4012         netif_start_queue(dev);
4013         return 0;
4014
4015 hw_init_failed:
4016         napi_disable(&sp->napi);
4017         if (sp->intr_type == MSI_X) {
4018                 if (sp->entries) {
4019                         kfree(sp->entries);
4020                         sp->mac_control.stats_info->sw_stat.mem_freed 
4021                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4022                 }
4023                 if (sp->s2io_entries) {
4024                         kfree(sp->s2io_entries);
4025                         sp->mac_control.stats_info->sw_stat.mem_freed 
4026                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4027                 }
4028         }
4029         return err;
4030 }
4031
4032 /**
4033  *  s2io_close -close entry point of the driver
4034  *  @dev : device pointer.
4035  *  Description:
4036  *  This is the stop entry point of the driver. It needs to undo exactly
4037  *  whatever was done by the open entry point,thus it's usually referred to
4038  *  as the close function.Among other things this function mainly stops the
4039  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4040  *  Return value:
4041  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4042  *  file on failure.
4043  */
4044
4045 static int s2io_close(struct net_device *dev)
4046 {
4047         struct s2io_nic *sp = dev->priv;
4048
4049         netif_stop_queue(dev);
4050         napi_disable(&sp->napi);
4051         /* Reset card, kill tasklet and free Tx and Rx buffers. */
4052         s2io_card_down(sp);
4053
4054         return 0;
4055 }
4056
4057 /**
4058  *  s2io_xmit - Tx entry point of te driver
4059  *  @skb : the socket buffer containing the Tx data.
4060  *  @dev : device pointer.
4061  *  Description :
4062  *  This function is the Tx entry point of the driver. S2IO NIC supports
4063  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4064  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4065  *  not be upadted.
4066  *  Return value:
4067  *  0 on success & 1 on failure.
4068  */
4069
4070 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4071 {
4072         struct s2io_nic *sp = dev->priv;
4073         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4074         register u64 val64;
4075         struct TxD *txdp;
4076         struct TxFIFO_element __iomem *tx_fifo;
4077         unsigned long flags;
4078         u16 vlan_tag = 0;
4079         int vlan_priority = 0;
4080         struct mac_info *mac_control;
4081         struct config_param *config;
4082         int offload_type;
4083         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4084
4085         mac_control = &sp->mac_control;
4086         config = &sp->config;
4087
4088         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4089
4090         if (unlikely(skb->len <= 0)) {
4091                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4092                 dev_kfree_skb_any(skb);
4093                 return 0;
4094 }
4095
4096         spin_lock_irqsave(&sp->tx_lock, flags);
4097         if (atomic_read(&sp->card_state) == CARD_DOWN) {
4098                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4099                           dev->name);
4100                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4101                 dev_kfree_skb(skb);
4102                 return 0;
4103         }
4104
4105         queue = 0;
4106         /* Get Fifo number to Transmit based on vlan priority */
4107         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4108                 vlan_tag = vlan_tx_tag_get(skb);
4109                 vlan_priority = vlan_tag >> 13;
4110                 queue = config->fifo_mapping[vlan_priority];
4111         }
4112
4113         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4114         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4115         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4116                 list_virt_addr;
4117
4118         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4119         /* Avoid "put" pointer going beyond "get" pointer */
4120         if (txdp->Host_Control ||
4121                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4122                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4123                 netif_stop_queue(dev);
4124                 dev_kfree_skb(skb);
4125                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4126                 return 0;
4127         }
4128
4129         offload_type = s2io_offload_type(skb);
4130         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4131                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4132                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4133         }
4134         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4135                 txdp->Control_2 |=
4136                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4137                      TXD_TX_CKO_UDP_EN);
4138         }
4139         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4140         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4141         txdp->Control_2 |= config->tx_intr_type;
4142
4143         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4144                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4145                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4146         }
4147
4148         frg_len = skb->len - skb->data_len;
4149         if (offload_type == SKB_GSO_UDP) {
4150                 int ufo_size;
4151
4152                 ufo_size = s2io_udp_mss(skb);
4153                 ufo_size &= ~7;
4154                 txdp->Control_1 |= TXD_UFO_EN;
4155                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4156                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4157 #ifdef __BIG_ENDIAN
4158                 sp->ufo_in_band_v[put_off] =
4159                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4160 #else
4161                 sp->ufo_in_band_v[put_off] =
4162                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4163 #endif
4164                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4165                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4166                                         sp->ufo_in_band_v,
4167                                         sizeof(u64), PCI_DMA_TODEVICE);
4168                 if((txdp->Buffer_Pointer == 0) ||
4169                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4170                         goto pci_map_failed;
4171                 txdp++;
4172         }
4173
4174         txdp->Buffer_Pointer = pci_map_single
4175             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4176         if((txdp->Buffer_Pointer == 0) ||
4177                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4178                 goto pci_map_failed;
4179
4180         txdp->Host_Control = (unsigned long) skb;
4181         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4182         if (offload_type == SKB_GSO_UDP)
4183                 txdp->Control_1 |= TXD_UFO_EN;
4184
4185         frg_cnt = skb_shinfo(skb)->nr_frags;
4186         /* For fragmented SKB. */
4187         for (i = 0; i < frg_cnt; i++) {
4188                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4189                 /* A '0' length fragment will be ignored */
4190                 if (!frag->size)
4191                         continue;
4192                 txdp++;
4193                 txdp->Buffer_Pointer = (u64) pci_map_page
4194                     (sp->pdev, frag->page, frag->page_offset,
4195                      frag->size, PCI_DMA_TODEVICE);
4196                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4197                 if (offload_type == SKB_GSO_UDP)
4198                         txdp->Control_1 |= TXD_UFO_EN;
4199         }
4200         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4201
4202         if (offload_type == SKB_GSO_UDP)
4203                 frg_cnt++; /* as Txd0 was used for inband header */
4204
4205         tx_fifo = mac_control->tx_FIFO_start[queue];
4206         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4207         writeq(val64, &tx_fifo->TxDL_Pointer);
4208
4209         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4210                  TX_FIFO_LAST_LIST);
4211         if (offload_type)
4212                 val64 |= TX_FIFO_SPECIAL_FUNC;
4213
4214         writeq(val64, &tx_fifo->List_Control);
4215
4216         mmiowb();
4217
4218         put_off++;
4219         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4220                 put_off = 0;
4221         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4222
4223         /* Avoid "put" pointer going beyond "get" pointer */
4224         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4225                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4226                 DBG_PRINT(TX_DBG,
4227                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4228                           put_off, get_off);
4229                 netif_stop_queue(dev);
4230         }
4231         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4232         dev->trans_start = jiffies;
4233         spin_unlock_irqrestore(&sp->tx_lock, flags);
4234
4235         return 0;
4236 pci_map_failed:
4237         stats->pci_map_fail_cnt++;
4238         netif_stop_queue(dev);
4239         stats->mem_freed += skb->truesize;
4240         dev_kfree_skb(skb);
4241         spin_unlock_irqrestore(&sp->tx_lock, flags);
4242         return 0;
4243 }
4244
4245 static void
4246 s2io_alarm_handle(unsigned long data)
4247 {
4248         struct s2io_nic *sp = (struct s2io_nic *)data;
4249
4250         alarm_intr_handler(sp);
4251         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4252 }
4253
4254 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4255 {
4256         int rxb_size, level;
4257
4258         if (!sp->lro) {
4259                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4260                 level = rx_buffer_level(sp, rxb_size, rng_n);
4261
4262                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4263                         int ret;
4264                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4265                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4266                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4267                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4268                                           __FUNCTION__);
4269                                 clear_bit(0, (&sp->tasklet_status));
4270                                 return -1;
4271                         }
4272                         clear_bit(0, (&sp->tasklet_status));
4273                 } else if (level == LOW)
4274                         tasklet_schedule(&sp->task);
4275
4276         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4277                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4278                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4279         }
4280         return 0;
4281 }
4282
4283 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4284 {
4285         struct ring_info *ring = (struct ring_info *)dev_id;
4286         struct s2io_nic *sp = ring->nic;
4287
4288         atomic_inc(&sp->isr_cnt);
4289
4290         rx_intr_handler(ring);
4291         s2io_chk_rx_buffers(sp, ring->ring_no);
4292
4293         atomic_dec(&sp->isr_cnt);
4294         return IRQ_HANDLED;
4295 }
4296
4297 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4298 {
4299         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4300         struct s2io_nic *sp = fifo->nic;
4301
4302         atomic_inc(&sp->isr_cnt);
4303         tx_intr_handler(fifo);
4304         atomic_dec(&sp->isr_cnt);
4305         return IRQ_HANDLED;
4306 }
4307 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4308 {
4309         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4310         u64 val64;
4311
4312         val64 = readq(&bar0->pic_int_status);
4313         if (val64 & PIC_INT_GPIO) {
4314                 val64 = readq(&bar0->gpio_int_reg);
4315                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4316                     (val64 & GPIO_INT_REG_LINK_UP)) {
4317                         /*
4318                          * This is unstable state so clear both up/down
4319                          * interrupt and adapter to re-evaluate the link state.
4320                          */
4321                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4322                         val64 |= GPIO_INT_REG_LINK_UP;
4323                         writeq(val64, &bar0->gpio_int_reg);
4324                         val64 = readq(&bar0->gpio_int_mask);
4325                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4326                                    GPIO_INT_MASK_LINK_DOWN);
4327                         writeq(val64, &bar0->gpio_int_mask);
4328                 }
4329                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4330                         val64 = readq(&bar0->adapter_status);
4331                                 /* Enable Adapter */
4332                         val64 = readq(&bar0->adapter_control);
4333                         val64 |= ADAPTER_CNTL_EN;
4334                         writeq(val64, &bar0->adapter_control);
4335                         val64 |= ADAPTER_LED_ON;
4336                         writeq(val64, &bar0->adapter_control);
4337                         if (!sp->device_enabled_once)
4338                                 sp->device_enabled_once = 1;
4339
4340                         s2io_link(sp, LINK_UP);
4341                         /*
4342                          * unmask link down interrupt and mask link-up
4343                          * intr
4344                          */
4345                         val64 = readq(&bar0->gpio_int_mask);
4346                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4347                         val64 |= GPIO_INT_MASK_LINK_UP;
4348                         writeq(val64, &bar0->gpio_int_mask);
4349
4350                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4351                         val64 = readq(&bar0->adapter_status);
4352                         s2io_link(sp, LINK_DOWN);
4353                         /* Link is down so unmaks link up interrupt */
4354                         val64 = readq(&bar0->gpio_int_mask);
4355                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4356                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4357                         writeq(val64, &bar0->gpio_int_mask);
4358
4359                         /* turn off LED */
4360                         val64 = readq(&bar0->adapter_control);
4361                         val64 = val64 &(~ADAPTER_LED_ON);
4362                         writeq(val64, &bar0->adapter_control);
4363                 }
4364         }
4365         val64 = readq(&bar0->gpio_int_mask);
4366 }
4367
4368 /**
4369  *  s2io_isr - ISR handler of the device .
4370  *  @irq: the irq of the device.
4371  *  @dev_id: a void pointer to the dev structure of the NIC.
4372  *  Description:  This function is the ISR handler of the device. It
4373  *  identifies the reason for the interrupt and calls the relevant
4374  *  service routines. As a contongency measure, this ISR allocates the
4375  *  recv buffers, if their numbers are below the panic value which is
4376  *  presently set to 25% of the original number of rcv buffers allocated.
4377  *  Return value:
4378  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4379  *   IRQ_NONE: will be returned if interrupt is not from our device
4380  */
4381 static irqreturn_t s2io_isr(int irq, void *dev_id)
4382 {
4383         struct net_device *dev = (struct net_device *) dev_id;
4384         struct s2io_nic *sp = dev->priv;
4385         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4386         int i;
4387         u64 reason = 0;
4388         struct mac_info *mac_control;
4389         struct config_param *config;
4390
4391         /* Pretend we handled any irq's from a disconnected card */
4392         if (pci_channel_offline(sp->pdev))
4393                 return IRQ_NONE;
4394
4395         atomic_inc(&sp->isr_cnt);
4396         mac_control = &sp->mac_control;
4397         config = &sp->config;
4398
4399         /*
4400          * Identify the cause for interrupt and call the appropriate
4401          * interrupt handler. Causes for the interrupt could be;
4402          * 1. Rx of packet.
4403          * 2. Tx complete.
4404          * 3. Link down.
4405          * 4. Error in any functional blocks of the NIC.
4406          */
4407         reason = readq(&bar0->general_int_status);
4408
4409         if (!reason) {
4410                 /* The interrupt was not raised by us. */
4411                 atomic_dec(&sp->isr_cnt);
4412                 return IRQ_NONE;
4413         }
4414         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4415                 /* Disable device and get out */
4416                 atomic_dec(&sp->isr_cnt);
4417                 return IRQ_NONE;
4418         }
4419
4420         if (napi) {
4421                 if (reason & GEN_INTR_RXTRAFFIC) {
4422                         if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4423                                 __netif_rx_schedule(dev, &sp->napi);
4424                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4425                         }
4426                         else
4427                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4428                 }
4429         } else {
4430                 /*
4431                  * Rx handler is called by default, without checking for the
4432                  * cause of interrupt.
4433                  * rx_traffic_int reg is an R1 register, writing all 1's
4434                  * will ensure that the actual interrupt causing bit get's
4435                  * cleared and hence a read can be avoided.
4436                  */
4437                 if (reason & GEN_INTR_RXTRAFFIC)
4438                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4439
4440                 for (i = 0; i < config->rx_ring_num; i++) {
4441                         rx_intr_handler(&mac_control->rings[i]);
4442                 }
4443         }
4444
4445         /*
4446          * tx_traffic_int reg is an R1 register, writing all 1's
4447          * will ensure that the actual interrupt causing bit get's
4448          * cleared and hence a read can be avoided.
4449          */
4450         if (reason & GEN_INTR_TXTRAFFIC)
4451                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4452
4453         for (i = 0; i < config->tx_fifo_num; i++)
4454                 tx_intr_handler(&mac_control->fifos[i]);
4455
4456         if (reason & GEN_INTR_TXPIC)
4457                 s2io_txpic_intr_handle(sp);
4458         /*
4459          * If the Rx buffer count is below the panic threshold then
4460          * reallocate the buffers from the interrupt handler itself,
4461          * else schedule a tasklet to reallocate the buffers.
4462          */
4463         if (!napi) {
4464                 for (i = 0; i < config->rx_ring_num; i++)
4465                         s2io_chk_rx_buffers(sp, i);
4466         }
4467
4468         writeq(0, &bar0->general_int_mask);
4469         readl(&bar0->general_int_status);
4470
4471         atomic_dec(&sp->isr_cnt);
4472         return IRQ_HANDLED;
4473 }
4474
4475 /**
4476  * s2io_updt_stats -
4477  */
4478 static void s2io_updt_stats(struct s2io_nic *sp)
4479 {
4480         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4481         u64 val64;
4482         int cnt = 0;
4483
4484         if (atomic_read(&sp->card_state) == CARD_UP) {
4485                 /* Apprx 30us on a 133 MHz bus */
4486                 val64 = SET_UPDT_CLICKS(10) |
4487                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4488                 writeq(val64, &bar0->stat_cfg);
4489                 do {
4490                         udelay(100);
4491                         val64 = readq(&bar0->stat_cfg);
4492                         if (!(val64 & BIT(0)))
4493                                 break;
4494                         cnt++;
4495                         if (cnt == 5)
4496                                 break; /* Updt failed */
4497                 } while(1);
4498         } 
4499 }
4500
4501 /**
4502  *  s2io_get_stats - Updates the device statistics structure.
4503  *  @dev : pointer to the device structure.
4504  *  Description:
4505  *  This function updates the device statistics structure in the s2io_nic
4506  *  structure and returns a pointer to the same.
4507  *  Return value:
4508  *  pointer to the updated net_device_stats structure.
4509  */
4510
4511 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4512 {
4513         struct s2io_nic *sp = dev->priv;
4514         struct mac_info *mac_control;
4515         struct config_param *config;
4516
4517
4518         mac_control = &sp->mac_control;
4519         config = &sp->config;
4520
4521         /* Configure Stats for immediate updt */
4522         s2io_updt_stats(sp);
4523
4524         sp->stats.tx_packets =
4525                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4526         sp->stats.tx_errors =
4527                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4528         sp->stats.rx_errors =
4529                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4530         sp->stats.multicast =
4531                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4532         sp->stats.rx_length_errors =
4533                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4534
4535         return (&sp->stats);
4536 }
4537
4538 /**
4539  *  s2io_set_multicast - entry point for multicast address enable/disable.
4540  *  @dev : pointer to the device structure
4541  *  Description:
4542  *  This function is a driver entry point which gets called by the kernel
4543  *  whenever multicast addresses must be enabled/disabled. This also gets
4544  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4545  *  determine, if multicast address must be enabled or if promiscuous mode
4546  *  is to be disabled etc.
4547  *  Return value:
4548  *  void.
4549  */
4550
4551 static void s2io_set_multicast(struct net_device *dev)
4552 {
4553         int i, j, prev_cnt;
4554         struct dev_mc_list *mclist;
4555         struct s2io_nic *sp = dev->priv;
4556         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4557         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4558             0xfeffffffffffULL;
4559         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4560         void __iomem *add;
4561
4562         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4563                 /*  Enable all Multicast addresses */
4564                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4565                        &bar0->rmac_addr_data0_mem);
4566                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4567                        &bar0->rmac_addr_data1_mem);
4568                 val64 = RMAC_ADDR_CMD_MEM_WE |
4569                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4570                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4571                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4572                 /* Wait till command completes */
4573                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4574                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4575                                         S2IO_BIT_RESET);
4576
4577                 sp->m_cast_flg = 1;
4578                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4579         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4580                 /*  Disable all Multicast addresses */
4581                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4582                        &bar0->rmac_addr_data0_mem);
4583                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4584                        &bar0->rmac_addr_data1_mem);
4585                 val64 = RMAC_ADDR_CMD_MEM_WE |
4586                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4587                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4588                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4589                 /* Wait till command completes */
4590                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4591                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4592                                         S2IO_BIT_RESET);
4593
4594                 sp->m_cast_flg = 0;
4595                 sp->all_multi_pos = 0;
4596         }
4597
4598         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4599                 /*  Put the NIC into promiscuous mode */
4600                 add = &bar0->mac_cfg;
4601                 val64 = readq(&bar0->mac_cfg);
4602                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4603
4604                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4605                 writel((u32) val64, add);
4606                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4607                 writel((u32) (val64 >> 32), (add + 4));
4608
4609                 if (vlan_tag_strip != 1) {
4610                         val64 = readq(&bar0->rx_pa_cfg);
4611                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4612                         writeq(val64, &bar0->rx_pa_cfg);
4613                         vlan_strip_flag = 0;
4614                 }
4615
4616                 val64 = readq(&bar0->mac_cfg);
4617                 sp->promisc_flg = 1;
4618                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4619                           dev->name);
4620         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4621                 /*  Remove the NIC from promiscuous mode */
4622                 add = &bar0->mac_cfg;
4623                 val64 = readq(&bar0->mac_cfg);
4624                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4625
4626                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4627                 writel((u32) val64, add);
4628                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4629                 writel((u32) (val64 >> 32), (add + 4));
4630
4631                 if (vlan_tag_strip != 0) {
4632                         val64 = readq(&bar0->rx_pa_cfg);
4633                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4634                         writeq(val64, &bar0->rx_pa_cfg);
4635                         vlan_strip_flag = 1;
4636                 }
4637
4638                 val64 = readq(&bar0->mac_cfg);
4639                 sp->promisc_flg = 0;
4640                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4641                           dev->name);
4642         }
4643
4644         /*  Update individual M_CAST address list */
4645         if ((!sp->m_cast_flg) && dev->mc_count) {
4646                 if (dev->mc_count >
4647                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4648                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4649                                   dev->name);
4650                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4651                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4652                         return;
4653                 }
4654
4655                 prev_cnt = sp->mc_addr_count;
4656                 sp->mc_addr_count = dev->mc_count;
4657
4658                 /* Clear out the previous list of Mc in the H/W. */
4659                 for (i = 0; i < prev_cnt; i++) {
4660                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4661                                &bar0->rmac_addr_data0_mem);
4662                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4663                                 &bar0->rmac_addr_data1_mem);
4664                         val64 = RMAC_ADDR_CMD_MEM_WE |
4665                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4666                             RMAC_ADDR_CMD_MEM_OFFSET
4667                             (MAC_MC_ADDR_START_OFFSET + i);
4668                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4669
4670                         /* Wait for command completes */
4671                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4672                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4673                                         S2IO_BIT_RESET)) {
4674                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4675                                           dev->name);
4676                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4677                                 return;
4678                         }
4679                 }
4680
4681                 /* Create the new Rx filter list and update the same in H/W. */
4682                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4683                      i++, mclist = mclist->next) {
4684                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4685                                ETH_ALEN);
4686                         mac_addr = 0;
4687                         for (j = 0; j < ETH_ALEN; j++) {
4688                                 mac_addr |= mclist->dmi_addr[j];
4689                                 mac_addr <<= 8;
4690                         }
4691                         mac_addr >>= 8;
4692                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4693                                &bar0->rmac_addr_data0_mem);
4694                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4695                                 &bar0->rmac_addr_data1_mem);
4696                         val64 = RMAC_ADDR_CMD_MEM_WE |
4697                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4698                             RMAC_ADDR_CMD_MEM_OFFSET
4699                             (i + MAC_MC_ADDR_START_OFFSET);
4700                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4701
4702                         /* Wait for command completes */
4703                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4704                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4705                                         S2IO_BIT_RESET)) {
4706                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4707                                           dev->name);
4708                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4709                                 return;
4710                         }
4711                 }
4712         }
4713 }
4714
4715 /**
4716  *  s2io_set_mac_addr - Programs the Xframe mac address
4717  *  @dev : pointer to the device structure.
4718  *  @addr: a uchar pointer to the new mac address which is to be set.
4719  *  Description : This procedure will program the Xframe to receive
4720  *  frames with new Mac Address
4721  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4722  *  as defined in errno.h file on failure.
4723  */
4724
4725 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4726 {
4727         struct s2io_nic *sp = dev->priv;
4728         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4729         register u64 val64, mac_addr = 0;
4730         int i;
4731         u64 old_mac_addr = 0;
4732
4733         /*
4734          * Set the new MAC address as the new unicast filter and reflect this
4735          * change on the device address registered with the OS. It will be
4736          * at offset 0.
4737          */
4738         for (i = 0; i < ETH_ALEN; i++) {
4739                 mac_addr <<= 8;
4740                 mac_addr |= addr[i];
4741                 old_mac_addr <<= 8;
4742                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4743         }
4744
4745         if(0 == mac_addr)
4746                 return SUCCESS;
4747
4748         /* Update the internal structure with this new mac address */
4749         if(mac_addr != old_mac_addr) {
4750                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4751                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4752                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4753                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4754                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4755                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4756                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4757         }
4758
4759         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4760                &bar0->rmac_addr_data0_mem);
4761
4762         val64 =
4763             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4764             RMAC_ADDR_CMD_MEM_OFFSET(0);
4765         writeq(val64, &bar0->rmac_addr_cmd_mem);
4766         /* Wait till command completes */
4767         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4768                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4769                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4770                 return FAILURE;
4771         }
4772
4773         return SUCCESS;
4774 }
4775
4776 /**
4777  * s2io_ethtool_sset - Sets different link parameters.
4778  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4779  * @info: pointer to the structure with parameters given by ethtool to set
4780  * link information.
4781  * Description:
4782  * The function sets different link parameters provided by the user onto
4783  * the NIC.
4784  * Return value:
4785  * 0 on success.
4786 */
4787
4788 static int s2io_ethtool_sset(struct net_device *dev,
4789                              struct ethtool_cmd *info)
4790 {
4791         struct s2io_nic *sp = dev->priv;
4792         if ((info->autoneg == AUTONEG_ENABLE) ||
4793             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4794                 return -EINVAL;
4795         else {
4796                 s2io_close(sp->dev);
4797                 s2io_open(sp->dev);
4798         }
4799
4800         return 0;
4801 }
4802
4803 /**
4804  * s2io_ethtol_gset - Return link specific information.
4805  * @sp : private member of the device structure, pointer to the
4806  *      s2io_nic structure.
4807  * @info : pointer to the structure with parameters given by ethtool
4808  * to return link information.
4809  * Description:
4810  * Returns link specific information like speed, duplex etc.. to ethtool.
4811  * Return value :
4812  * return 0 on success.
4813  */
4814
4815 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4816 {
4817         struct s2io_nic *sp = dev->priv;
4818         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4819         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4820         info->port = PORT_FIBRE;
4821         /* info->transceiver?? TODO */
4822
4823         if (netif_carrier_ok(sp->dev)) {
4824                 info->speed = 10000;
4825                 info->duplex = DUPLEX_FULL;
4826         } else {
4827                 info->speed = -1;
4828                 info->duplex = -1;
4829         }
4830
4831         info->autoneg = AUTONEG_DISABLE;
4832         return 0;
4833 }
4834
4835 /**
4836  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4837  * @sp : private member of the device structure, which is a pointer to the
4838  * s2io_nic structure.
4839  * @info : pointer to the structure with parameters given by ethtool to
4840  * return driver information.
4841  * Description:
4842  * Returns driver specefic information like name, version etc.. to ethtool.
4843  * Return value:
4844  *  void
4845  */
4846
4847 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4848                                   struct ethtool_drvinfo *info)
4849 {
4850         struct s2io_nic *sp = dev->priv;
4851
4852         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4853         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4854         strncpy(info->fw_version, "", sizeof(info->fw_version));
4855         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4856         info->regdump_len = XENA_REG_SPACE;
4857         info->eedump_len = XENA_EEPROM_SPACE;
4858         info->testinfo_len = S2IO_TEST_LEN;
4859
4860         if (sp->device_type == XFRAME_I_DEVICE)
4861                 info->n_stats = XFRAME_I_STAT_LEN;
4862         else
4863                 info->n_stats = XFRAME_II_STAT_LEN;
4864 }
4865
4866 /**
4867  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4868  *  @sp: private member of the device structure, which is a pointer to the
4869  *  s2io_nic structure.
4870  *  @regs : pointer to the structure with parameters given by ethtool for
4871  *  dumping the registers.
4872  *  @reg_space: The input argumnet into which all the registers are dumped.
4873  *  Description:
4874  *  Dumps the entire register space of xFrame NIC into the user given
4875  *  buffer area.
4876  * Return value :
4877  * void .
4878 */
4879
4880 static void s2io_ethtool_gregs(struct net_device *dev,
4881                                struct ethtool_regs *regs, void *space)
4882 {
4883         int i;
4884         u64 reg;
4885         u8 *reg_space = (u8 *) space;
4886         struct s2io_nic *sp = dev->priv;
4887
4888         regs->len = XENA_REG_SPACE;
4889         regs->version = sp->pdev->subsystem_device;
4890
4891         for (i = 0; i < regs->len; i += 8) {
4892                 reg = readq(sp->bar0 + i);
4893                 memcpy((reg_space + i), &reg, 8);
4894         }
4895 }
4896
4897 /**
4898  *  s2io_phy_id  - timer function that alternates adapter LED.
4899  *  @data : address of the private member of the device structure, which
4900  *  is a pointer to the s2io_nic structure, provided as an u32.
4901  * Description: This is actually the timer function that alternates the
4902  * adapter LED bit of the adapter control bit to set/reset every time on
4903  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4904  *  once every second.
4905 */
4906 static void s2io_phy_id(unsigned long data)
4907 {
4908         struct s2io_nic *sp = (struct s2io_nic *) data;
4909         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4910         u64 val64 = 0;
4911         u16 subid;
4912
4913         subid = sp->pdev->subsystem_device;
4914         if ((sp->device_type == XFRAME_II_DEVICE) ||
4915                    ((subid & 0xFF) >= 0x07)) {
4916                 val64 = readq(&bar0->gpio_control);
4917                 val64 ^= GPIO_CTRL_GPIO_0;
4918                 writeq(val64, &bar0->gpio_control);
4919         } else {
4920                 val64 = readq(&bar0->adapter_control);
4921                 val64 ^= ADAPTER_LED_ON;
4922                 writeq(val64, &bar0->adapter_control);
4923         }
4924
4925         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4926 }
4927
4928 /**
4929  * s2io_ethtool_idnic - To physically identify the nic on the system.
4930  * @sp : private member of the device structure, which is a pointer to the
4931  * s2io_nic structure.
4932  * @id : pointer to the structure with identification parameters given by
4933  * ethtool.
4934  * Description: Used to physically identify the NIC on the system.
4935  * The Link LED will blink for a time specified by the user for
4936  * identification.
4937  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4938  * identification is possible only if it's link is up.
4939  * Return value:
4940  * int , returns 0 on success
4941  */
4942
4943 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4944 {
4945         u64 val64 = 0, last_gpio_ctrl_val;
4946         struct s2io_nic *sp = dev->priv;
4947         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4948         u16 subid;
4949
4950         subid = sp->pdev->subsystem_device;
4951         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4952         if ((sp->device_type == XFRAME_I_DEVICE) &&
4953                 ((subid & 0xFF) < 0x07)) {
4954                 val64 = readq(&bar0->adapter_control);
4955                 if (!(val64 & ADAPTER_CNTL_EN)) {
4956                         printk(KERN_ERR
4957                                "Adapter Link down, cannot blink LED\n");
4958                         return -EFAULT;
4959                 }
4960         }
4961         if (sp->id_timer.function == NULL) {
4962                 init_timer(&sp->id_timer);
4963                 sp->id_timer.function = s2io_phy_id;
4964                 sp->id_timer.data = (unsigned long) sp;
4965         }
4966         mod_timer(&sp->id_timer, jiffies);
4967         if (data)
4968                 msleep_interruptible(data * HZ);
4969         else
4970                 msleep_interruptible(MAX_FLICKER_TIME);
4971         del_timer_sync(&sp->id_timer);
4972
4973         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4974                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4975                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4976         }
4977
4978         return 0;
4979 }
4980
4981 static void s2io_ethtool_gringparam(struct net_device *dev,
4982                                     struct ethtool_ringparam *ering)
4983 {
4984         struct s2io_nic *sp = dev->priv;
4985         int i,tx_desc_count=0,rx_desc_count=0;
4986
4987         if (sp->rxd_mode == RXD_MODE_1)
4988                 ering->rx_max_pending = MAX_RX_DESC_1;
4989         else if (sp->rxd_mode == RXD_MODE_3B)
4990                 ering->rx_max_pending = MAX_RX_DESC_2;
4991
4992         ering->tx_max_pending = MAX_TX_DESC;
4993         for (i = 0 ; i < sp->config.tx_fifo_num ; i++) 
4994                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4995         
4996         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4997         ering->tx_pending = tx_desc_count;
4998         rx_desc_count = 0;
4999         for (i = 0 ; i < sp->config.rx_ring_num ; i++) 
5000                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5001
5002         ering->rx_pending = rx_desc_count;
5003
5004         ering->rx_mini_max_pending = 0;
5005         ering->rx_mini_pending = 0;
5006         if(sp->rxd_mode == RXD_MODE_1)
5007                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5008         else if (sp->rxd_mode == RXD_MODE_3B)
5009                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5010         ering->rx_jumbo_pending = rx_desc_count;
5011 }
5012
5013 /**
5014  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5015  * @sp : private member of the device structure, which is a pointer to the
5016  *      s2io_nic structure.
5017  * @ep : pointer to the structure with pause parameters given by ethtool.
5018  * Description:
5019  * Returns the Pause frame generation and reception capability of the NIC.
5020  * Return value:
5021  *  void
5022  */
5023 static void s2io_ethtool_getpause_data(struct net_device *dev,
5024                                        struct ethtool_pauseparam *ep)
5025 {
5026         u64 val64;
5027         struct s2io_nic *sp = dev->priv;
5028         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5029
5030         val64 = readq(&bar0->rmac_pause_cfg);
5031         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5032                 ep->tx_pause = TRUE;
5033         if (val64 & RMAC_PAUSE_RX_ENABLE)
5034                 ep->rx_pause = TRUE;
5035         ep->autoneg = FALSE;
5036 }
5037
5038 /**
5039  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5040  * @sp : private member of the device structure, which is a pointer to the
5041  *      s2io_nic structure.
5042  * @ep : pointer to the structure with pause parameters given by ethtool.
5043  * Description:
5044  * It can be used to set or reset Pause frame generation or reception
5045  * support of the NIC.
5046  * Return value:
5047  * int, returns 0 on Success
5048  */
5049
5050 static int s2io_ethtool_setpause_data(struct net_device *dev,
5051                                struct ethtool_pauseparam *ep)
5052 {
5053         u64 val64;
5054         struct s2io_nic *sp = dev->priv;
5055         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5056
5057         val64 = readq(&bar0->rmac_pause_cfg);
5058         if (ep->tx_pause)
5059                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5060         else
5061                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5062         if (ep->rx_pause)
5063                 val64 |= RMAC_PAUSE_RX_ENABLE;
5064         else
5065                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5066         writeq(val64, &bar0->rmac_pause_cfg);
5067         return 0;
5068 }
5069
5070 /**
5071  * read_eeprom - reads 4 bytes of data from user given offset.
5072  * @sp : private member of the device structure, which is a pointer to the
5073  *      s2io_nic structure.
5074  * @off : offset at which the data must be written
5075  * @data : Its an output parameter where the data read at the given
5076  *      offset is stored.
5077  * Description:
5078  * Will read 4 bytes of data from the user given offset and return the
5079  * read data.
5080  * NOTE: Will allow to read only part of the EEPROM visible through the
5081  *   I2C bus.
5082  * Return value:
5083  *  -1 on failure and 0 on success.
5084  */
5085
5086 #define S2IO_DEV_ID             5
5087 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5088 {
5089         int ret = -1;
5090         u32 exit_cnt = 0;
5091         u64 val64;
5092         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5093
5094         if (sp->device_type == XFRAME_I_DEVICE) {
5095                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5096                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5097                     I2C_CONTROL_CNTL_START;
5098                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5099
5100                 while (exit_cnt < 5) {
5101                         val64 = readq(&bar0->i2c_control);
5102                         if (I2C_CONTROL_CNTL_END(val64)) {
5103                                 *data = I2C_CONTROL_GET_DATA(val64);
5104                                 ret = 0;
5105                                 break;
5106                         }
5107                         msleep(50);
5108                         exit_cnt++;
5109                 }
5110         }
5111
5112         if (sp->device_type == XFRAME_II_DEVICE) {
5113                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5114                         SPI_CONTROL_BYTECNT(0x3) |
5115                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5116                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5117                 val64 |= SPI_CONTROL_REQ;
5118                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5119                 while (exit_cnt < 5) {
5120                         val64 = readq(&bar0->spi_control);
5121                         if (val64 & SPI_CONTROL_NACK) {
5122                                 ret = 1;
5123                                 break;
5124                         } else if (val64 & SPI_CONTROL_DONE) {
5125                                 *data = readq(&bar0->spi_data);
5126                                 *data &= 0xffffff;
5127                                 ret = 0;
5128                                 break;
5129                         }
5130                         msleep(50);
5131                         exit_cnt++;
5132                 }
5133         }
5134         return ret;
5135 }
5136
5137 /**
5138  *  write_eeprom - actually writes the relevant part of the data value.
5139  *  @sp : private member of the device structure, which is a pointer to the
5140  *       s2io_nic structure.
5141  *  @off : offset at which the data must be written
5142  *  @data : The data that is to be written
5143  *  @cnt : Number of bytes of the data that are actually to be written into
5144  *  the Eeprom. (max of 3)
5145  * Description:
5146  *  Actually writes the relevant part of the data value into the Eeprom
5147  *  through the I2C bus.
5148  * Return value:
5149  *  0 on success, -1 on failure.
5150  */
5151
5152 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5153 {
5154         int exit_cnt = 0, ret = -1;
5155         u64 val64;
5156         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5157
5158         if (sp->device_type == XFRAME_I_DEVICE) {
5159                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5160                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5161                     I2C_CONTROL_CNTL_START;
5162                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5163
5164                 while (exit_cnt < 5) {
5165                         val64 = readq(&bar0->i2c_control);
5166                         if (I2C_CONTROL_CNTL_END(val64)) {
5167                                 if (!(val64 & I2C_CONTROL_NACK))
5168                                         ret = 0;
5169                                 break;
5170                         }
5171                         msleep(50);
5172                         exit_cnt++;
5173                 }
5174         }
5175
5176         if (sp->device_type == XFRAME_II_DEVICE) {
5177                 int write_cnt = (cnt == 8) ? 0 : cnt;
5178                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5179
5180                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5181                         SPI_CONTROL_BYTECNT(write_cnt) |
5182                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5183                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5184                 val64 |= SPI_CONTROL_REQ;
5185                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5186                 while (exit_cnt < 5) {
5187                         val64 = readq(&bar0->spi_control);
5188                         if (val64 & SPI_CONTROL_NACK) {
5189                                 ret = 1;
5190                                 break;
5191                         } else if (val64 & SPI_CONTROL_DONE) {
5192                                 ret = 0;
5193                                 break;
5194                         }
5195                         msleep(50);
5196                         exit_cnt++;
5197                 }
5198         }
5199         return ret;
5200 }
5201 static void s2io_vpd_read(struct s2io_nic *nic)
5202 {
5203         u8 *vpd_data;
5204         u8 data;
5205         int i=0, cnt, fail = 0;
5206         int vpd_addr = 0x80;
5207
5208         if (nic->device_type == XFRAME_II_DEVICE) {
5209                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5210                 vpd_addr = 0x80;
5211         }
5212         else {
5213                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5214                 vpd_addr = 0x50;
5215         }
5216         strcpy(nic->serial_num, "NOT AVAILABLE");
5217
5218         vpd_data = kmalloc(256, GFP_KERNEL);
5219         if (!vpd_data) {
5220                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5221                 return;
5222         }
5223         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5224
5225         for (i = 0; i < 256; i +=4 ) {
5226                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5227                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5228                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5229                 for (cnt = 0; cnt <5; cnt++) {
5230                         msleep(2);
5231                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5232                         if (data == 0x80)
5233                                 break;
5234                 }
5235                 if (cnt >= 5) {
5236                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5237                         fail = 1;
5238                         break;
5239                 }
5240                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5241                                       (u32 *)&vpd_data[i]);
5242         }
5243
5244         if(!fail) {
5245                 /* read serial number of adapter */
5246                 for (cnt = 0; cnt < 256; cnt++) {
5247                 if ((vpd_data[cnt] == 'S') &&
5248                         (vpd_data[cnt+1] == 'N') &&
5249                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5250                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5251                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5252                                         vpd_data[cnt+2]);
5253                                 break;
5254                         }
5255                 }
5256         }
5257
5258         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5259                 memset(nic->product_name, 0, vpd_data[1]);
5260                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5261         }
5262         kfree(vpd_data);
5263         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5264 }
5265
5266 /**
5267  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5268  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5269  *  @eeprom : pointer to the user level structure provided by ethtool,
5270  *  containing all relevant information.
5271  *  @data_buf : user defined value to be written into Eeprom.
5272  *  Description: Reads the values stored in the Eeprom at given offset
5273  *  for a given length. Stores these values int the input argument data
5274  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5275  *  Return value:
5276  *  int  0 on success
5277  */
5278
5279 static int s2io_ethtool_geeprom(struct net_device *dev,
5280                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5281 {
5282         u32 i, valid;
5283         u64 data;
5284         struct s2io_nic *sp = dev->priv;
5285
5286         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5287
5288         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5289                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5290
5291         for (i = 0; i < eeprom->len; i += 4) {
5292                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5293                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5294                         return -EFAULT;
5295                 }
5296                 valid = INV(data);
5297                 memcpy((data_buf + i), &valid, 4);
5298         }
5299         return 0;
5300 }
5301
5302 /**
5303  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5304  *  @sp : private member of the device structure, which is a pointer to the
5305  *  s2io_nic structure.
5306  *  @eeprom : pointer to the user level structure provided by ethtool,
5307  *  containing all relevant information.
5308  *  @data_buf ; user defined value to be written into Eeprom.
5309  *  Description:
5310  *  Tries to write the user provided value in the Eeprom, at the offset
5311  *  given by the user.
5312  *  Return value:
5313  *  0 on success, -EFAULT on failure.
5314  */
5315
5316 static int s2io_ethtool_seeprom(struct net_device *dev,
5317                                 struct ethtool_eeprom *eeprom,
5318                                 u8 * data_buf)
5319 {
5320         int len = eeprom->len, cnt = 0;
5321         u64 valid = 0, data;
5322         struct s2io_nic *sp = dev->priv;
5323
5324         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5325                 DBG_PRINT(ERR_DBG,
5326                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5327                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5328                           eeprom->magic);
5329                 return -EFAULT;
5330         }
5331
5332         while (len) {
5333                 data = (u32) data_buf[cnt] & 0x000000FF;
5334                 if (data) {
5335                         valid = (u32) (data << 24);
5336                 } else
5337                         valid = data;
5338
5339                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5340                         DBG_PRINT(ERR_DBG,
5341                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5342                         DBG_PRINT(ERR_DBG,
5343                                   "write into the specified offset\n");
5344                         return -EFAULT;
5345                 }
5346                 cnt++;
5347                 len--;
5348         }
5349
5350         return 0;
5351 }
5352
5353 /**
5354  * s2io_register_test - reads and writes into all clock domains.
5355  * @sp : private member of the device structure, which is a pointer to the
5356  * s2io_nic structure.
5357  * @data : variable that returns the result of each of the test conducted b
5358  * by the driver.
5359  * Description:
5360  * Read and write into all clock domains. The NIC has 3 clock domains,
5361  * see that registers in all the three regions are accessible.
5362  * Return value:
5363  * 0 on success.
5364  */
5365
5366 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5367 {
5368         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5369         u64 val64 = 0, exp_val;
5370         int fail = 0;
5371
5372         val64 = readq(&bar0->pif_rd_swapper_fb);
5373         if (val64 != 0x123456789abcdefULL) {
5374                 fail = 1;
5375                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5376         }
5377
5378         val64 = readq(&bar0->rmac_pause_cfg);
5379         if (val64 != 0xc000ffff00000000ULL) {
5380                 fail = 1;
5381                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5382         }
5383
5384         val64 = readq(&bar0->rx_queue_cfg);
5385         if (sp->device_type == XFRAME_II_DEVICE)
5386                 exp_val = 0x0404040404040404ULL;
5387         else
5388                 exp_val = 0x0808080808080808ULL;
5389         if (val64 != exp_val) {
5390                 fail = 1;
5391                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5392         }
5393
5394         val64 = readq(&bar0->xgxs_efifo_cfg);
5395         if (val64 != 0x000000001923141EULL) {
5396                 fail = 1;
5397                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5398         }
5399
5400         val64 = 0x5A5A5A5A5A5A5A5AULL;
5401         writeq(val64, &bar0->xmsi_data);
5402         val64 = readq(&bar0->xmsi_data);
5403         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5404                 fail = 1;
5405                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5406         }
5407
5408         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5409         writeq(val64, &bar0->xmsi_data);
5410         val64 = readq(&bar0->xmsi_data);
5411         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5412                 fail = 1;
5413                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5414         }
5415
5416         *data = fail;
5417         return fail;
5418 }
5419
5420 /**
5421  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5422  * @sp : private member of the device structure, which is a pointer to the
5423  * s2io_nic structure.
5424  * @data:variable that returns the result of each of the test conducted by
5425  * the driver.
5426  * Description:
5427  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5428  * register.
5429  * Return value:
5430  * 0 on success.
5431  */
5432
5433 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5434 {
5435         int fail = 0;
5436         u64 ret_data, org_4F0, org_7F0;
5437         u8 saved_4F0 = 0, saved_7F0 = 0;
5438         struct net_device *dev = sp->dev;
5439
5440         /* Test Write Error at offset 0 */
5441         /* Note that SPI interface allows write access to all areas
5442          * of EEPROM. Hence doing all negative testing only for Xframe I.
5443          */
5444         if (sp->device_type == XFRAME_I_DEVICE)
5445                 if (!write_eeprom(sp, 0, 0, 3))
5446                         fail = 1;
5447
5448         /* Save current values at offsets 0x4F0 and 0x7F0 */
5449         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5450                 saved_4F0 = 1;
5451         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5452                 saved_7F0 = 1;
5453
5454         /* Test Write at offset 4f0 */
5455         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5456                 fail = 1;
5457         if (read_eeprom(sp, 0x4F0, &ret_data))
5458                 fail = 1;
5459
5460         if (ret_data != 0x012345) {
5461                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5462                         "Data written %llx Data read %llx\n",
5463                         dev->name, (unsigned long long)0x12345,
5464                         (unsigned long long)ret_data);
5465                 fail = 1;
5466         }
5467
5468         /* Reset the EEPROM data go FFFF */
5469         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5470
5471         /* Test Write Request Error at offset 0x7c */
5472         if (sp->device_type == XFRAME_I_DEVICE)
5473                 if (!write_eeprom(sp, 0x07C, 0, 3))
5474                         fail = 1;
5475
5476         /* Test Write Request at offset 0x7f0 */
5477         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5478                 fail = 1;
5479         if (read_eeprom(sp, 0x7F0, &ret_data))
5480                 fail = 1;
5481
5482         if (ret_data != 0x012345) {
5483                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5484                         "Data written %llx Data read %llx\n",
5485                         dev->name, (unsigned long long)0x12345,
5486                         (unsigned long long)ret_data);
5487                 fail = 1;
5488         }
5489
5490         /* Reset the EEPROM data go FFFF */
5491         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5492
5493         if (sp->device_type == XFRAME_I_DEVICE) {
5494                 /* Test Write Error at offset 0x80 */
5495                 if (!write_eeprom(sp, 0x080, 0, 3))
5496                         fail = 1;
5497
5498                 /* Test Write Error at offset 0xfc */
5499                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5500                         fail = 1;
5501
5502                 /* Test Write Error at offset 0x100 */
5503                 if (!write_eeprom(sp, 0x100, 0, 3))
5504                         fail = 1;
5505
5506                 /* Test Write Error at offset 4ec */
5507                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5508                         fail = 1;
5509         }
5510
5511         /* Restore values at offsets 0x4F0 and 0x7F0 */
5512         if (saved_4F0)
5513                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5514         if (saved_7F0)
5515                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5516
5517         *data = fail;
5518         return fail;
5519 }
5520
5521 /**
5522  * s2io_bist_test - invokes the MemBist test of the card .
5523  * @sp : private member of the device structure, which is a pointer to the
5524  * s2io_nic structure.
5525  * @data:variable that returns the result of each of the test conducted by
5526  * the driver.
5527  * Description:
5528  * This invokes the MemBist test of the card. We give around
5529  * 2 secs time for the Test to complete. If it's still not complete
5530  * within this peiod, we consider that the test failed.
5531  * Return value:
5532  * 0 on success and -1 on failure.
5533  */
5534
5535 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5536 {
5537         u8 bist = 0;
5538         int cnt = 0, ret = -1;
5539
5540         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5541         bist |= PCI_BIST_START;
5542         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5543
5544         while (cnt < 20) {
5545                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5546                 if (!(bist & PCI_BIST_START)) {
5547                         *data = (bist & PCI_BIST_CODE_MASK);
5548                         ret = 0;
5549                         break;
5550                 }
5551                 msleep(100);
5552                 cnt++;
5553         }
5554
5555         return ret;
5556 }
5557
5558 /**
5559  * s2io-link_test - verifies the link state of the nic
5560  * @sp ; private member of the device structure, which is a pointer to the
5561  * s2io_nic structure.
5562  * @data: variable that returns the result of each of the test conducted by
5563  * the driver.
5564  * Description:
5565  * The function verifies the link state of the NIC and updates the input
5566  * argument 'data' appropriately.
5567  * Return value:
5568  * 0 on success.
5569  */
5570
5571 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5572 {
5573         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5574         u64 val64;
5575
5576         val64 = readq(&bar0->adapter_status);
5577         if(!(LINK_IS_UP(val64)))
5578                 *data = 1;
5579         else
5580                 *data = 0;
5581
5582         return *data;
5583 }
5584
5585 /**
5586  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5587  * @sp - private member of the device structure, which is a pointer to the
5588  * s2io_nic structure.
5589  * @data - variable that returns the result of each of the test
5590  * conducted by the driver.
5591  * Description:
5592  *  This is one of the offline test that tests the read and write
5593  *  access to the RldRam chip on the NIC.
5594  * Return value:
5595  *  0 on success.
5596  */
5597
5598 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5599 {
5600         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5601         u64 val64;
5602         int cnt, iteration = 0, test_fail = 0;
5603
5604         val64 = readq(&bar0->adapter_control);
5605         val64 &= ~ADAPTER_ECC_EN;
5606         writeq(val64, &bar0->adapter_control);
5607
5608         val64 = readq(&bar0->mc_rldram_test_ctrl);
5609         val64 |= MC_RLDRAM_TEST_MODE;
5610         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5611
5612         val64 = readq(&bar0->mc_rldram_mrs);
5613         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5614         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5615
5616         val64 |= MC_RLDRAM_MRS_ENABLE;
5617         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5618
5619         while (iteration < 2) {
5620                 val64 = 0x55555555aaaa0000ULL;
5621                 if (iteration == 1) {
5622                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5623                 }
5624                 writeq(val64, &bar0->mc_rldram_test_d0);
5625
5626                 val64 = 0xaaaa5a5555550000ULL;
5627                 if (iteration == 1) {
5628                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5629                 }
5630                 writeq(val64, &bar0->mc_rldram_test_d1);
5631
5632                 val64 = 0x55aaaaaaaa5a0000ULL;
5633                 if (iteration == 1) {
5634                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5635                 }
5636                 writeq(val64, &bar0->mc_rldram_test_d2);
5637
5638                 val64 = (u64) (0x0000003ffffe0100ULL);
5639                 writeq(val64, &bar0->mc_rldram_test_add);
5640
5641                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5642                         MC_RLDRAM_TEST_GO;
5643                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5644
5645                 for (cnt = 0; cnt < 5; cnt++) {
5646                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5647                         if (val64 & MC_RLDRAM_TEST_DONE)
5648                                 break;
5649                         msleep(200);
5650                 }
5651
5652                 if (cnt == 5)
5653                         break;
5654
5655                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5656                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5657
5658                 for (cnt = 0; cnt < 5; cnt++) {
5659                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5660                         if (val64 & MC_RLDRAM_TEST_DONE)
5661                                 break;
5662                         msleep(500);
5663                 }
5664
5665                 if (cnt == 5)
5666                         break;
5667
5668                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5669                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5670                         test_fail = 1;
5671
5672                 iteration++;
5673         }
5674
5675         *data = test_fail;
5676
5677         /* Bring the adapter out of test mode */
5678         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5679
5680         return test_fail;
5681 }
5682
5683 /**
5684  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5685  *  @sp : private member of the device structure, which is a pointer to the
5686  *  s2io_nic structure.
5687  *  @ethtest : pointer to a ethtool command specific structure that will be
5688  *  returned to the user.
5689  *  @data : variable that returns the result of each of the test
5690  * conducted by the driver.
5691  * Description:
5692  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5693  *  the health of the card.
5694  * Return value:
5695  *  void
5696  */
5697
5698 static void s2io_ethtool_test(struct net_device *dev,
5699                               struct ethtool_test *ethtest,
5700                               uint64_t * data)
5701 {
5702         struct s2io_nic *sp = dev->priv;
5703         int orig_state = netif_running(sp->dev);
5704
5705         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5706                 /* Offline Tests. */
5707                 if (orig_state)
5708                         s2io_close(sp->dev);
5709
5710                 if (s2io_register_test(sp, &data[0]))
5711                         ethtest->flags |= ETH_TEST_FL_FAILED;
5712
5713                 s2io_reset(sp);
5714
5715                 if (s2io_rldram_test(sp, &data[3]))
5716                         ethtest->flags |= ETH_TEST_FL_FAILED;
5717
5718                 s2io_reset(sp);
5719
5720                 if (s2io_eeprom_test(sp, &data[1]))
5721                         ethtest->flags |= ETH_TEST_FL_FAILED;
5722
5723                 if (s2io_bist_test(sp, &data[4]))
5724                         ethtest->flags |= ETH_TEST_FL_FAILED;
5725
5726                 if (orig_state)
5727                         s2io_open(sp->dev);
5728
5729                 data[2] = 0;
5730         } else {
5731                 /* Online Tests. */
5732                 if (!orig_state) {
5733                         DBG_PRINT(ERR_DBG,
5734                                   "%s: is not up, cannot run test\n",
5735                                   dev->name);
5736                         data[0] = -1;
5737                         data[1] = -1;
5738                         data[2] = -1;
5739                         data[3] = -1;
5740                         data[4] = -1;
5741                 }
5742
5743                 if (s2io_link_test(sp, &data[2]))
5744                         ethtest->flags |= ETH_TEST_FL_FAILED;
5745
5746                 data[0] = 0;
5747                 data[1] = 0;
5748                 data[3] = 0;
5749                 data[4] = 0;
5750         }
5751 }
5752
5753 static void s2io_get_ethtool_stats(struct net_device *dev,
5754                                    struct ethtool_stats *estats,
5755                                    u64 * tmp_stats)
5756 {
5757         int i = 0;
5758         struct s2io_nic *sp = dev->priv;
5759         struct stat_block *stat_info = sp->mac_control.stats_info;
5760
5761         s2io_updt_stats(sp);
5762         tmp_stats[i++] =
5763                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5764                 le32_to_cpu(stat_info->tmac_frms);
5765         tmp_stats[i++] =
5766                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5767                 le32_to_cpu(stat_info->tmac_data_octets);
5768         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5769         tmp_stats[i++] =
5770                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5771                 le32_to_cpu(stat_info->tmac_mcst_frms);
5772         tmp_stats[i++] =
5773                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5774                 le32_to_cpu(stat_info->tmac_bcst_frms);
5775         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5776         tmp_stats[i++] =
5777                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5778                 le32_to_cpu(stat_info->tmac_ttl_octets);
5779         tmp_stats[i++] =
5780                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5781                 le32_to_cpu(stat_info->tmac_ucst_frms);
5782         tmp_stats[i++] =
5783                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5784                 le32_to_cpu(stat_info->tmac_nucst_frms);
5785         tmp_stats[i++] =
5786                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5787                 le32_to_cpu(stat_info->tmac_any_err_frms);
5788         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5789         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5790         tmp_stats[i++] =
5791                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5792                 le32_to_cpu(stat_info->tmac_vld_ip);
5793         tmp_stats[i++] =
5794                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5795                 le32_to_cpu(stat_info->tmac_drop_ip);
5796         tmp_stats[i++] =
5797                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5798                 le32_to_cpu(stat_info->tmac_icmp);
5799         tmp_stats[i++] =
5800                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5801                 le32_to_cpu(stat_info->tmac_rst_tcp);
5802         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5803         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5804                 le32_to_cpu(stat_info->tmac_udp);
5805         tmp_stats[i++] =
5806                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5807                 le32_to_cpu(stat_info->rmac_vld_frms);
5808         tmp_stats[i++] =
5809                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5810                 le32_to_cpu(stat_info->rmac_data_octets);
5811         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5812         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5813         tmp_stats[i++] =
5814                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5815                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5816         tmp_stats[i++] =
5817                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5818                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5819         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5820         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5821         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5822         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5823         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5824         tmp_stats[i++] =
5825                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5826                 le32_to_cpu(stat_info->rmac_ttl_octets);
5827         tmp_stats[i++] =
5828                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5829                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5830         tmp_stats[i++] =
5831                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5832                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5833         tmp_stats[i++] =
5834                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5835                 le32_to_cpu(stat_info->rmac_discarded_frms);
5836         tmp_stats[i++] =
5837                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5838                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5839         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5840         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5841         tmp_stats[i++] =
5842                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5843                 le32_to_cpu(stat_info->rmac_usized_frms);
5844         tmp_stats[i++] =
5845                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5846                 le32_to_cpu(stat_info->rmac_osized_frms);
5847         tmp_stats[i++] =
5848                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5849                 le32_to_cpu(stat_info->rmac_frag_frms);
5850         tmp_stats[i++] =
5851                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5852                 le32_to_cpu(stat_info->rmac_jabber_frms);
5853         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5854         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5855         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5856         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5857         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5858         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5859         tmp_stats[i++] =
5860                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5861                 le32_to_cpu(stat_info->rmac_ip);
5862         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5863         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5864         tmp_stats[i++] =
5865                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5866                 le32_to_cpu(stat_info->rmac_drop_ip);
5867         tmp_stats[i++] =
5868                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5869                 le32_to_cpu(stat_info->rmac_icmp);
5870         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5871         tmp_stats[i++] =
5872                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5873                 le32_to_cpu(stat_info->rmac_udp);
5874         tmp_stats[i++] =
5875                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5876                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5877         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5878         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5879         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5880         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5881         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5882         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5883         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5884         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5885         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5886         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5887         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5888         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5889         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5890         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5891         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5892         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5893         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5894         tmp_stats[i++] =
5895                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5896                 le32_to_cpu(stat_info->rmac_pause_cnt);
5897         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5898         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5899         tmp_stats[i++] =
5900                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5901                 le32_to_cpu(stat_info->rmac_accepted_ip);
5902         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5903         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5904         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5905         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5906         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5907         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5908         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5909         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5910         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5911         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5912         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5913         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5914         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5915         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5916         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5917         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5918         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5919         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5920         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5921
5922         /* Enhanced statistics exist only for Hercules */
5923         if(sp->device_type == XFRAME_II_DEVICE) {
5924                 tmp_stats[i++] =
5925                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5926                 tmp_stats[i++] =
5927                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5928                 tmp_stats[i++] =
5929                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5930                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5931                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5932                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5933                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5934                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5935                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5936                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5937                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5938                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5939                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5940                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5941                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5942                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5943         }
5944
5945         tmp_stats[i++] = 0;
5946         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5947         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5948         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5949         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5950         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5951         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5952         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5953         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5954         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5955         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5956         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5957         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5958         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5959         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5960         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5961         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5962         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5963         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5964         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5965         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5966         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5967         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5968         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5969         if (stat_info->sw_stat.num_aggregations) {
5970                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5971                 int count = 0;
5972                 /*
5973                  * Since 64-bit divide does not work on all platforms,
5974                  * do repeated subtraction.
5975                  */
5976                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5977                         tmp -= stat_info->sw_stat.num_aggregations;
5978                         count++;
5979                 }
5980                 tmp_stats[i++] = count;
5981         }
5982         else
5983                 tmp_stats[i++] = 0;
5984         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5985         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5986         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5987         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5988         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5989         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5990         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5991         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5992         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5993
5994         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5995         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5996         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5997         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5998         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5999
6000         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6001         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6002         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6003         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6004         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6005         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6006         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6007         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6008         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6009 }
6010
6011 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6012 {
6013         return (XENA_REG_SPACE);
6014 }
6015
6016
6017 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6018 {
6019         struct s2io_nic *sp = dev->priv;
6020
6021         return (sp->rx_csum);
6022 }
6023
6024 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6025 {
6026         struct s2io_nic *sp = dev->priv;
6027
6028         if (data)
6029                 sp->rx_csum = 1;
6030         else
6031                 sp->rx_csum = 0;
6032
6033         return 0;
6034 }
6035
6036 static int s2io_get_eeprom_len(struct net_device *dev)
6037 {
6038         return (XENA_EEPROM_SPACE);
6039 }
6040
6041 static int s2io_ethtool_self_test_count(struct net_device *dev)
6042 {
6043         return (S2IO_TEST_LEN);
6044 }
6045
6046 static void s2io_ethtool_get_strings(struct net_device *dev,
6047                                      u32 stringset, u8 * data)
6048 {
6049         int stat_size = 0;
6050         struct s2io_nic *sp = dev->priv;
6051
6052         switch (stringset) {
6053         case ETH_SS_TEST:
6054                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6055                 break;
6056         case ETH_SS_STATS:
6057                 stat_size = sizeof(ethtool_xena_stats_keys);
6058                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6059                 if(sp->device_type == XFRAME_II_DEVICE) {
6060                         memcpy(data + stat_size,
6061                                 &ethtool_enhanced_stats_keys,
6062                                 sizeof(ethtool_enhanced_stats_keys));
6063                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6064                 }
6065
6066                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6067                         sizeof(ethtool_driver_stats_keys));
6068         }
6069 }
6070 static int s2io_ethtool_get_stats_count(struct net_device *dev)
6071 {
6072         struct s2io_nic *sp = dev->priv;
6073         int stat_count = 0;
6074         switch(sp->device_type) {
6075         case XFRAME_I_DEVICE:
6076                 stat_count = XFRAME_I_STAT_LEN;
6077         break;
6078
6079         case XFRAME_II_DEVICE:
6080                 stat_count = XFRAME_II_STAT_LEN;
6081         break;
6082         }
6083
6084         return stat_count;
6085 }
6086
6087 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6088 {
6089         if (data)
6090                 dev->features |= NETIF_F_IP_CSUM;
6091         else
6092                 dev->features &= ~NETIF_F_IP_CSUM;
6093
6094         return 0;
6095 }
6096
6097 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6098 {
6099         return (dev->features & NETIF_F_TSO) != 0;
6100 }
6101 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6102 {
6103         if (data)
6104                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6105         else
6106                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6107
6108         return 0;
6109 }
6110
6111 static const struct ethtool_ops netdev_ethtool_ops = {
6112         .get_settings = s2io_ethtool_gset,
6113         .set_settings = s2io_ethtool_sset,
6114         .get_drvinfo = s2io_ethtool_gdrvinfo,
6115         .get_regs_len = s2io_ethtool_get_regs_len,
6116         .get_regs = s2io_ethtool_gregs,
6117         .get_link = ethtool_op_get_link,
6118         .get_eeprom_len = s2io_get_eeprom_len,
6119         .get_eeprom = s2io_ethtool_geeprom,
6120         .set_eeprom = s2io_ethtool_seeprom,
6121         .get_ringparam = s2io_ethtool_gringparam,
6122         .get_pauseparam = s2io_ethtool_getpause_data,
6123         .set_pauseparam = s2io_ethtool_setpause_data,
6124         .get_rx_csum = s2io_ethtool_get_rx_csum,
6125         .set_rx_csum = s2io_ethtool_set_rx_csum,
6126         .get_tx_csum = ethtool_op_get_tx_csum,
6127         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6128         .get_sg = ethtool_op_get_sg,
6129         .set_sg = ethtool_op_set_sg,
6130         .get_tso = s2io_ethtool_op_get_tso,
6131         .set_tso = s2io_ethtool_op_set_tso,
6132         .get_ufo = ethtool_op_get_ufo,
6133         .set_ufo = ethtool_op_set_ufo,
6134         .self_test_count = s2io_ethtool_self_test_count,
6135         .self_test = s2io_ethtool_test,
6136         .get_strings = s2io_ethtool_get_strings,
6137         .phys_id = s2io_ethtool_idnic,
6138         .get_stats_count = s2io_ethtool_get_stats_count,
6139         .get_ethtool_stats = s2io_get_ethtool_stats
6140 };
6141
6142 /**
6143  *  s2io_ioctl - Entry point for the Ioctl
6144  *  @dev :  Device pointer.
6145  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6146  *  a proprietary structure used to pass information to the driver.
6147  *  @cmd :  This is used to distinguish between the different commands that
6148  *  can be passed to the IOCTL functions.
6149  *  Description:
6150  *  Currently there are no special functionality supported in IOCTL, hence
6151  *  function always return EOPNOTSUPPORTED
6152  */
6153
6154 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6155 {
6156         return -EOPNOTSUPP;
6157 }
6158
6159 /**
6160  *  s2io_change_mtu - entry point to change MTU size for the device.
6161  *   @dev : device pointer.
6162  *   @new_mtu : the new MTU size for the device.
6163  *   Description: A driver entry point to change MTU size for the device.
6164  *   Before changing the MTU the device must be stopped.
6165  *  Return value:
6166  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6167  *   file on failure.
6168  */
6169
6170 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6171 {
6172         struct s2io_nic *sp = dev->priv;
6173
6174         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6175                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6176                           dev->name);
6177                 return -EPERM;
6178         }
6179
6180         dev->mtu = new_mtu;
6181         if (netif_running(dev)) {
6182                 s2io_card_down(sp);
6183                 netif_stop_queue(dev);
6184                 if (s2io_card_up(sp)) {
6185                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6186                                   __FUNCTION__);
6187                 }
6188                 if (netif_queue_stopped(dev))
6189                         netif_wake_queue(dev);
6190         } else { /* Device is down */
6191                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6192                 u64 val64 = new_mtu;
6193
6194                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6195         }
6196
6197         return 0;
6198 }
6199
6200 /**
6201  *  s2io_tasklet - Bottom half of the ISR.
6202  *  @dev_adr : address of the device structure in dma_addr_t format.
6203  *  Description:
6204  *  This is the tasklet or the bottom half of the ISR. This is
6205  *  an extension of the ISR which is scheduled by the scheduler to be run
6206  *  when the load on the CPU is low. All low priority tasks of the ISR can
6207  *  be pushed into the tasklet. For now the tasklet is used only to
6208  *  replenish the Rx buffers in the Rx buffer descriptors.
6209  *  Return value:
6210  *  void.
6211  */
6212
6213 static void s2io_tasklet(unsigned long dev_addr)
6214 {
6215         struct net_device *dev = (struct net_device *) dev_addr;
6216         struct s2io_nic *sp = dev->priv;
6217         int i, ret;
6218         struct mac_info *mac_control;
6219         struct config_param *config;
6220
6221         mac_control = &sp->mac_control;
6222         config = &sp->config;
6223
6224         if (!TASKLET_IN_USE) {
6225                 for (i = 0; i < config->rx_ring_num; i++) {
6226                         ret = fill_rx_buffers(sp, i);
6227                         if (ret == -ENOMEM) {
6228                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6229                                           dev->name);
6230                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6231                                 break;
6232                         } else if (ret == -EFILL) {
6233                                 DBG_PRINT(INFO_DBG,
6234                                           "%s: Rx Ring %d is full\n",
6235                                           dev->name, i);
6236                                 break;
6237                         }
6238                 }
6239                 clear_bit(0, (&sp->tasklet_status));
6240         }
6241 }
6242
6243 /**
6244  * s2io_set_link - Set the LInk status
6245  * @data: long pointer to device private structue
6246  * Description: Sets the link status for the adapter
6247  */
6248
6249 static void s2io_set_link(struct work_struct *work)
6250 {
6251         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6252         struct net_device *dev = nic->dev;
6253         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6254         register u64 val64;
6255         u16 subid;
6256
6257         rtnl_lock();
6258
6259         if (!netif_running(dev))
6260                 goto out_unlock;
6261
6262         if (test_and_set_bit(0, &(nic->link_state))) {
6263                 /* The card is being reset, no point doing anything */
6264                 goto out_unlock;
6265         }
6266
6267         subid = nic->pdev->subsystem_device;
6268         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6269                 /*
6270                  * Allow a small delay for the NICs self initiated
6271                  * cleanup to complete.
6272                  */
6273                 msleep(100);
6274         }
6275
6276         val64 = readq(&bar0->adapter_status);
6277         if (LINK_IS_UP(val64)) {
6278                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6279                         if (verify_xena_quiescence(nic)) {
6280                                 val64 = readq(&bar0->adapter_control);
6281                                 val64 |= ADAPTER_CNTL_EN;
6282                                 writeq(val64, &bar0->adapter_control);
6283                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6284                                         nic->device_type, subid)) {
6285                                         val64 = readq(&bar0->gpio_control);
6286                                         val64 |= GPIO_CTRL_GPIO_0;
6287                                         writeq(val64, &bar0->gpio_control);
6288                                         val64 = readq(&bar0->gpio_control);
6289                                 } else {
6290                                         val64 |= ADAPTER_LED_ON;
6291                                         writeq(val64, &bar0->adapter_control);
6292                                 }
6293                                 nic->device_enabled_once = TRUE;
6294                         } else {
6295                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6296                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6297                                 netif_stop_queue(dev);
6298                         }
6299                 }
6300                 val64 = readq(&bar0->adapter_control);
6301                 val64 |= ADAPTER_LED_ON;
6302                 writeq(val64, &bar0->adapter_control);
6303                 s2io_link(nic, LINK_UP);
6304         } else {
6305                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6306                                                       subid)) {
6307                         val64 = readq(&bar0->gpio_control);
6308                         val64 &= ~GPIO_CTRL_GPIO_0;
6309                         writeq(val64, &bar0->gpio_control);
6310                         val64 = readq(&bar0->gpio_control);
6311                 }
6312                 /* turn off LED */
6313                 val64 = readq(&bar0->adapter_control);
6314                 val64 = val64 &(~ADAPTER_LED_ON);
6315                 writeq(val64, &bar0->adapter_control);
6316                 s2io_link(nic, LINK_DOWN);
6317         }
6318         clear_bit(0, &(nic->link_state));
6319
6320 out_unlock:
6321         rtnl_unlock();
6322 }
6323
6324 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6325                                 struct buffAdd *ba,
6326                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6327                                 u64 *temp2, int size)
6328 {
6329         struct net_device *dev = sp->dev;
6330         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6331
6332         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6333                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6334                 /* allocate skb */
6335                 if (*skb) {
6336                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6337                         /*
6338                          * As Rx frame are not going to be processed,
6339                          * using same mapped address for the Rxd
6340                          * buffer pointer
6341                          */
6342                         rxdp1->Buffer0_ptr = *temp0;
6343                 } else {
6344                         *skb = dev_alloc_skb(size);
6345                         if (!(*skb)) {
6346                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6347                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6348                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6349                                 sp->mac_control.stats_info->sw_stat. \
6350                                         mem_alloc_fail_cnt++;
6351                                 return -ENOMEM ;
6352                         }
6353                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6354                                 += (*skb)->truesize;
6355                         /* storing the mapped addr in a temp variable
6356                          * such it will be used for next rxd whose
6357                          * Host Control is NULL
6358                          */
6359                         rxdp1->Buffer0_ptr = *temp0 =
6360                                 pci_map_single( sp->pdev, (*skb)->data,
6361                                         size - NET_IP_ALIGN,
6362                                         PCI_DMA_FROMDEVICE);
6363                         if( (rxdp1->Buffer0_ptr == 0) ||
6364                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6365                                 goto memalloc_failed;
6366                         }
6367                         rxdp->Host_Control = (unsigned long) (*skb);
6368                 }
6369         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6370                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6371                 /* Two buffer Mode */
6372                 if (*skb) {
6373                         rxdp3->Buffer2_ptr = *temp2;
6374                         rxdp3->Buffer0_ptr = *temp0;
6375                         rxdp3->Buffer1_ptr = *temp1;
6376                 } else {
6377                         *skb = dev_alloc_skb(size);
6378                         if (!(*skb)) {
6379                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6380                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6381                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6382                                 sp->mac_control.stats_info->sw_stat. \
6383                                         mem_alloc_fail_cnt++;
6384                                 return -ENOMEM;
6385                         }
6386                         sp->mac_control.stats_info->sw_stat.mem_allocated 
6387                                 += (*skb)->truesize;
6388                         rxdp3->Buffer2_ptr = *temp2 =
6389                                 pci_map_single(sp->pdev, (*skb)->data,
6390                                                dev->mtu + 4,
6391                                                PCI_DMA_FROMDEVICE);
6392                         if( (rxdp3->Buffer2_ptr == 0) ||
6393                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6394                                 goto memalloc_failed;
6395                         }
6396                         rxdp3->Buffer0_ptr = *temp0 =
6397                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6398                                                 PCI_DMA_FROMDEVICE);
6399                         if( (rxdp3->Buffer0_ptr == 0) ||
6400                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6401                                 pci_unmap_single (sp->pdev,
6402                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6403                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6404                                 goto memalloc_failed;
6405                         }
6406                         rxdp->Host_Control = (unsigned long) (*skb);
6407
6408                         /* Buffer-1 will be dummy buffer not used */
6409                         rxdp3->Buffer1_ptr = *temp1 =
6410                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6411                                                 PCI_DMA_FROMDEVICE);
6412                         if( (rxdp3->Buffer1_ptr == 0) ||
6413                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6414                                 pci_unmap_single (sp->pdev,
6415                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6416                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6417                                 pci_unmap_single (sp->pdev,
6418                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6419                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6420                                 goto memalloc_failed;
6421                         }
6422                 }
6423         }
6424         return 0;
6425         memalloc_failed:
6426                 stats->pci_map_fail_cnt++;
6427                 stats->mem_freed += (*skb)->truesize;
6428                 dev_kfree_skb(*skb);
6429                 return -ENOMEM;
6430 }
6431
6432 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6433                                 int size)
6434 {
6435         struct net_device *dev = sp->dev;
6436         if (sp->rxd_mode == RXD_MODE_1) {
6437                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6438         } else if (sp->rxd_mode == RXD_MODE_3B) {
6439                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6440                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6441                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6442         }
6443 }
6444
6445 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6446 {
6447         int i, j, k, blk_cnt = 0, size;
6448         struct mac_info * mac_control = &sp->mac_control;
6449         struct config_param *config = &sp->config;
6450         struct net_device *dev = sp->dev;
6451         struct RxD_t *rxdp = NULL;
6452         struct sk_buff *skb = NULL;
6453         struct buffAdd *ba = NULL;
6454         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6455
6456         /* Calculate the size based on ring mode */
6457         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6458                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6459         if (sp->rxd_mode == RXD_MODE_1)
6460                 size += NET_IP_ALIGN;
6461         else if (sp->rxd_mode == RXD_MODE_3B)
6462                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6463
6464         for (i = 0; i < config->rx_ring_num; i++) {
6465                 blk_cnt = config->rx_cfg[i].num_rxd /
6466                         (rxd_count[sp->rxd_mode] +1);
6467
6468                 for (j = 0; j < blk_cnt; j++) {
6469                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6470                                 rxdp = mac_control->rings[i].
6471                                         rx_blocks[j].rxds[k].virt_addr;
6472                                 if(sp->rxd_mode == RXD_MODE_3B)
6473                                         ba = &mac_control->rings[i].ba[j][k];
6474                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6475                                                        &skb,(u64 *)&temp0_64,
6476                                                        (u64 *)&temp1_64,
6477                                                        (u64 *)&temp2_64,
6478                                                         size) == ENOMEM) {
6479                                         return 0;
6480                                 }
6481
6482                                 set_rxd_buffer_size(sp, rxdp, size);
6483                                 wmb();
6484                                 /* flip the Ownership bit to Hardware */
6485                                 rxdp->Control_1 |= RXD_OWN_XENA;
6486                         }
6487                 }
6488         }
6489         return 0;
6490
6491 }
6492
6493 static int s2io_add_isr(struct s2io_nic * sp)
6494 {
6495         int ret = 0;
6496         struct net_device *dev = sp->dev;
6497         int err = 0;
6498
6499         if (sp->intr_type == MSI_X)
6500                 ret = s2io_enable_msi_x(sp);
6501         if (ret) {
6502                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6503                 sp->intr_type = INTA;
6504         }
6505
6506         /* Store the values of the MSIX table in the struct s2io_nic structure */
6507         store_xmsi_data(sp);
6508
6509         /* After proper initialization of H/W, register ISR */
6510         if (sp->intr_type == MSI_X) {
6511                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6512
6513                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6514                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6515                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6516                                         dev->name, i);
6517                                 err = request_irq(sp->entries[i].vector,
6518                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6519                                                   sp->s2io_entries[i].arg);
6520                                 /* If either data or addr is zero print it */
6521                                 if(!(sp->msix_info[i].addr &&
6522                                         sp->msix_info[i].data)) {
6523                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6524                                                 "Data:0x%lx\n",sp->desc[i],
6525                                                 (unsigned long long)
6526                                                 sp->msix_info[i].addr,
6527                                                 (unsigned long)
6528                                                 ntohl(sp->msix_info[i].data));
6529                                 } else {
6530                                         msix_tx_cnt++;
6531                                 }
6532                         } else {
6533                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6534                                         dev->name, i);
6535                                 err = request_irq(sp->entries[i].vector,
6536                                           s2io_msix_ring_handle, 0, sp->desc[i],
6537                                                   sp->s2io_entries[i].arg);
6538                                 /* If either data or addr is zero print it */
6539                                 if(!(sp->msix_info[i].addr &&
6540                                         sp->msix_info[i].data)) {
6541                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6542                                                 "Data:0x%lx\n",sp->desc[i],
6543                                                 (unsigned long long)
6544                                                 sp->msix_info[i].addr,
6545                                                 (unsigned long)
6546                                                 ntohl(sp->msix_info[i].data));
6547                                 } else {
6548                                         msix_rx_cnt++;
6549                                 }
6550                         }
6551                         if (err) {
6552                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6553                                           "failed\n", dev->name, i);
6554                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6555                                 return -1;
6556                         }
6557                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6558                 }
6559                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6560                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6561         }
6562         if (sp->intr_type == INTA) {
6563                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6564                                 sp->name, dev);
6565                 if (err) {
6566                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6567                                   dev->name);
6568                         return -1;
6569                 }
6570         }
6571         return 0;
6572 }
6573 static void s2io_rem_isr(struct s2io_nic * sp)
6574 {
6575         int cnt = 0;
6576         struct net_device *dev = sp->dev;
6577         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6578
6579         if (sp->intr_type == MSI_X) {
6580                 int i;
6581                 u16 msi_control;
6582
6583                 for (i=1; (sp->s2io_entries[i].in_use ==
6584                         MSIX_REGISTERED_SUCCESS); i++) {
6585                         int vector = sp->entries[i].vector;
6586                         void *arg = sp->s2io_entries[i].arg;
6587
6588                         free_irq(vector, arg);
6589                 }
6590
6591                 kfree(sp->entries);
6592                 stats->mem_freed +=
6593                         (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6594                 kfree(sp->s2io_entries);
6595                 stats->mem_freed +=
6596                         (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6597                 sp->entries = NULL;
6598                 sp->s2io_entries = NULL;
6599
6600                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6601                 msi_control &= 0xFFFE; /* Disable MSI */
6602                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6603
6604                 pci_disable_msix(sp->pdev);
6605         } else {
6606                 free_irq(sp->pdev->irq, dev);
6607         }
6608         /* Waiting till all Interrupt handlers are complete */
6609         cnt = 0;
6610         do {
6611                 msleep(10);
6612                 if (!atomic_read(&sp->isr_cnt))
6613                         break;
6614                 cnt++;
6615         } while(cnt < 5);
6616 }
6617
6618 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6619 {
6620         int cnt = 0;
6621         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6622         unsigned long flags;
6623         register u64 val64 = 0;
6624
6625         del_timer_sync(&sp->alarm_timer);
6626         /* If s2io_set_link task is executing, wait till it completes. */
6627         while (test_and_set_bit(0, &(sp->link_state))) {
6628                 msleep(50);
6629         }
6630         atomic_set(&sp->card_state, CARD_DOWN);
6631
6632         /* disable Tx and Rx traffic on the NIC */
6633         if (do_io)
6634                 stop_nic(sp);
6635
6636         s2io_rem_isr(sp);
6637
6638         /* Kill tasklet. */
6639         tasklet_kill(&sp->task);
6640
6641         /* Check if the device is Quiescent and then Reset the NIC */
6642         while(do_io) {
6643                 /* As per the HW requirement we need to replenish the
6644                  * receive buffer to avoid the ring bump. Since there is
6645                  * no intention of processing the Rx frame at this pointwe are
6646                  * just settting the ownership bit of rxd in Each Rx
6647                  * ring to HW and set the appropriate buffer size
6648                  * based on the ring mode
6649                  */
6650                 rxd_owner_bit_reset(sp);
6651
6652                 val64 = readq(&bar0->adapter_status);
6653                 if (verify_xena_quiescence(sp)) {
6654                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6655                         break;
6656                 }
6657
6658                 msleep(50);
6659                 cnt++;
6660                 if (cnt == 10) {
6661                         DBG_PRINT(ERR_DBG,
6662                                   "s2io_close:Device not Quiescent ");
6663                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6664                                   (unsigned long long) val64);
6665                         break;
6666                 }
6667         }
6668         if (do_io)
6669                 s2io_reset(sp);
6670
6671         spin_lock_irqsave(&sp->tx_lock, flags);
6672         /* Free all Tx buffers */
6673         free_tx_buffers(sp);
6674         spin_unlock_irqrestore(&sp->tx_lock, flags);
6675
6676         /* Free all Rx buffers */
6677         spin_lock_irqsave(&sp->rx_lock, flags);
6678         free_rx_buffers(sp);
6679         spin_unlock_irqrestore(&sp->rx_lock, flags);
6680
6681         clear_bit(0, &(sp->link_state));
6682 }
6683
6684 static void s2io_card_down(struct s2io_nic * sp)
6685 {
6686         do_s2io_card_down(sp, 1);
6687 }
6688
6689 static int s2io_card_up(struct s2io_nic * sp)
6690 {
6691         int i, ret = 0;
6692         struct mac_info *mac_control;
6693         struct config_param *config;
6694         struct net_device *dev = (struct net_device *) sp->dev;
6695         u16 interruptible;
6696
6697         /* Initialize the H/W I/O registers */
6698         if (init_nic(sp) != 0) {
6699                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6700                           dev->name);
6701                 s2io_reset(sp);
6702                 return -ENODEV;
6703         }
6704
6705         /*
6706          * Initializing the Rx buffers. For now we are considering only 1
6707          * Rx ring and initializing buffers into 30 Rx blocks
6708          */
6709         mac_control = &sp->mac_control;
6710         config = &sp->config;
6711
6712         for (i = 0; i < config->rx_ring_num; i++) {
6713                 if ((ret = fill_rx_buffers(sp, i))) {
6714                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6715                                   dev->name);
6716                         s2io_reset(sp);
6717                         free_rx_buffers(sp);
6718                         return -ENOMEM;
6719                 }
6720                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6721                           atomic_read(&sp->rx_bufs_left[i]));
6722         }
6723         /* Maintain the state prior to the open */
6724         if (sp->promisc_flg)
6725                 sp->promisc_flg = 0;
6726         if (sp->m_cast_flg) {
6727                 sp->m_cast_flg = 0;
6728                 sp->all_multi_pos= 0;
6729         }
6730
6731         /* Setting its receive mode */
6732         s2io_set_multicast(dev);
6733
6734         if (sp->lro) {
6735                 /* Initialize max aggregatable pkts per session based on MTU */
6736                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6737                 /* Check if we can use(if specified) user provided value */
6738                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6739                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6740         }
6741
6742         /* Enable Rx Traffic and interrupts on the NIC */
6743         if (start_nic(sp)) {
6744                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6745                 s2io_reset(sp);
6746                 free_rx_buffers(sp);
6747                 return -ENODEV;
6748         }
6749
6750         /* Add interrupt service routine */
6751         if (s2io_add_isr(sp) != 0) {
6752                 if (sp->intr_type == MSI_X)
6753                         s2io_rem_isr(sp);
6754                 s2io_reset(sp);
6755                 free_rx_buffers(sp);
6756                 return -ENODEV;
6757         }
6758
6759         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6760
6761         /* Enable tasklet for the device */
6762         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6763
6764         /*  Enable select interrupts */
6765         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6766         if (sp->intr_type != INTA)
6767                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6768         else {
6769                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6770                 interruptible |= TX_PIC_INTR;
6771                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6772         }
6773
6774
6775         atomic_set(&sp->card_state, CARD_UP);
6776         return 0;
6777 }
6778
6779 /**
6780  * s2io_restart_nic - Resets the NIC.
6781  * @data : long pointer to the device private structure
6782  * Description:
6783  * This function is scheduled to be run by the s2io_tx_watchdog
6784  * function after 0.5 secs to reset the NIC. The idea is to reduce
6785  * the run time of the watch dog routine which is run holding a
6786  * spin lock.
6787  */
6788
6789 static void s2io_restart_nic(struct work_struct *work)
6790 {
6791         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6792         struct net_device *dev = sp->dev;
6793
6794         rtnl_lock();
6795
6796         if (!netif_running(dev))
6797                 goto out_unlock;
6798
6799         s2io_card_down(sp);
6800         if (s2io_card_up(sp)) {
6801                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6802                           dev->name);
6803         }
6804         netif_wake_queue(dev);
6805         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6806                   dev->name);
6807 out_unlock:
6808         rtnl_unlock();
6809 }
6810
6811 /**
6812  *  s2io_tx_watchdog - Watchdog for transmit side.
6813  *  @dev : Pointer to net device structure
6814  *  Description:
6815  *  This function is triggered if the Tx Queue is stopped
6816  *  for a pre-defined amount of time when the Interface is still up.
6817  *  If the Interface is jammed in such a situation, the hardware is
6818  *  reset (by s2io_close) and restarted again (by s2io_open) to
6819  *  overcome any problem that might have been caused in the hardware.
6820  *  Return value:
6821  *  void
6822  */
6823
6824 static void s2io_tx_watchdog(struct net_device *dev)
6825 {
6826         struct s2io_nic *sp = dev->priv;
6827
6828         if (netif_carrier_ok(dev)) {
6829                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6830                 schedule_work(&sp->rst_timer_task);
6831                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6832         }
6833 }
6834
6835 /**
6836  *   rx_osm_handler - To perform some OS related operations on SKB.
6837  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6838  *   @skb : the socket buffer pointer.
6839  *   @len : length of the packet
6840  *   @cksum : FCS checksum of the frame.
6841  *   @ring_no : the ring from which this RxD was extracted.
6842  *   Description:
6843  *   This function is called by the Rx interrupt serivce routine to perform
6844  *   some OS related operations on the SKB before passing it to the upper
6845  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6846  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6847  *   to the upper layer. If the checksum is wrong, it increments the Rx
6848  *   packet error count, frees the SKB and returns error.
6849  *   Return value:
6850  *   SUCCESS on success and -1 on failure.
6851  */
6852 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6853 {
6854         struct s2io_nic *sp = ring_data->nic;
6855         struct net_device *dev = (struct net_device *) sp->dev;
6856         struct sk_buff *skb = (struct sk_buff *)
6857                 ((unsigned long) rxdp->Host_Control);
6858         int ring_no = ring_data->ring_no;
6859         u16 l3_csum, l4_csum;
6860         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6861         struct lro *lro;
6862         u8 err_mask;
6863
6864         skb->dev = dev;
6865
6866         if (err) {
6867                 /* Check for parity error */
6868                 if (err & 0x1) {
6869                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6870                 }
6871                 err_mask = err >> 48;
6872                 switch(err_mask) {
6873                         case 1:
6874                                 sp->mac_control.stats_info->sw_stat.
6875                                 rx_parity_err_cnt++;
6876                         break;
6877
6878                         case 2:
6879                                 sp->mac_control.stats_info->sw_stat.
6880                                 rx_abort_cnt++;
6881                         break;
6882
6883                         case 3:
6884                                 sp->mac_control.stats_info->sw_stat.
6885                                 rx_parity_abort_cnt++;
6886                         break;
6887
6888                         case 4:
6889                                 sp->mac_control.stats_info->sw_stat.
6890                                 rx_rda_fail_cnt++;
6891                         break;
6892
6893                         case 5:
6894                                 sp->mac_control.stats_info->sw_stat.
6895                                 rx_unkn_prot_cnt++;
6896                         break;
6897
6898                         case 6:
6899                                 sp->mac_control.stats_info->sw_stat.
6900                                 rx_fcs_err_cnt++;
6901                         break;
6902
6903                         case 7:
6904                                 sp->mac_control.stats_info->sw_stat.
6905                                 rx_buf_size_err_cnt++;
6906                         break;
6907
6908                         case 8:
6909                                 sp->mac_control.stats_info->sw_stat.
6910                                 rx_rxd_corrupt_cnt++;
6911                         break;
6912
6913                         case 15:
6914                                 sp->mac_control.stats_info->sw_stat.
6915                                 rx_unkn_err_cnt++;
6916                         break;
6917                 }
6918                 /*
6919                 * Drop the packet if bad transfer code. Exception being
6920                 * 0x5, which could be due to unsupported IPv6 extension header.
6921                 * In this case, we let stack handle the packet.
6922                 * Note that in this case, since checksum will be incorrect,
6923                 * stack will validate the same.
6924                 */
6925                 if (err_mask != 0x5) {
6926                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6927                                 dev->name, err_mask);
6928                         sp->stats.rx_crc_errors++;
6929                         sp->mac_control.stats_info->sw_stat.mem_freed 
6930                                 += skb->truesize;
6931                         dev_kfree_skb(skb);
6932                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6933                         rxdp->Host_Control = 0;
6934                         return 0;
6935                 }
6936         }
6937
6938         /* Updating statistics */
6939         sp->stats.rx_packets++;
6940         rxdp->Host_Control = 0;
6941         if (sp->rxd_mode == RXD_MODE_1) {
6942                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6943
6944                 sp->stats.rx_bytes += len;
6945                 skb_put(skb, len);
6946
6947         } else if (sp->rxd_mode == RXD_MODE_3B) {
6948                 int get_block = ring_data->rx_curr_get_info.block_index;
6949                 int get_off = ring_data->rx_curr_get_info.offset;
6950                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6951                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6952                 unsigned char *buff = skb_push(skb, buf0_len);
6953
6954                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6955                 sp->stats.rx_bytes += buf0_len + buf2_len;
6956                 memcpy(buff, ba->ba_0, buf0_len);
6957                 skb_put(skb, buf2_len);
6958         }
6959
6960         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6961             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6962             (sp->rx_csum)) {
6963                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6964                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6965                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6966                         /*
6967                          * NIC verifies if the Checksum of the received
6968                          * frame is Ok or not and accordingly returns
6969                          * a flag in the RxD.
6970                          */
6971                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6972                         if (sp->lro) {
6973                                 u32 tcp_len;
6974                                 u8 *tcp;
6975                                 int ret = 0;
6976
6977                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6978                                                 &tcp_len, &lro, rxdp, sp);
6979                                 switch (ret) {
6980                                         case 3: /* Begin anew */
6981                                                 lro->parent = skb;
6982                                                 goto aggregate;
6983                                         case 1: /* Aggregate */
6984                                         {
6985                                                 lro_append_pkt(sp, lro,
6986                                                         skb, tcp_len);
6987                                                 goto aggregate;
6988                                         }
6989                                         case 4: /* Flush session */
6990                                         {
6991                                                 lro_append_pkt(sp, lro,
6992                                                         skb, tcp_len);
6993                                                 queue_rx_frame(lro->parent);
6994                                                 clear_lro_session(lro);
6995                                                 sp->mac_control.stats_info->
6996                                                     sw_stat.flush_max_pkts++;
6997                                                 goto aggregate;
6998                                         }
6999                                         case 2: /* Flush both */
7000                                                 lro->parent->data_len =
7001                                                         lro->frags_len;
7002                                                 sp->mac_control.stats_info->
7003                                                      sw_stat.sending_both++;
7004                                                 queue_rx_frame(lro->parent);
7005                                                 clear_lro_session(lro);
7006                                                 goto send_up;
7007                                         case 0: /* sessions exceeded */
7008                                         case -1: /* non-TCP or not
7009                                                   * L2 aggregatable
7010                                                   */
7011                                         case 5: /*
7012                                                  * First pkt in session not
7013                                                  * L3/L4 aggregatable
7014                                                  */
7015                                                 break;
7016                                         default:
7017                                                 DBG_PRINT(ERR_DBG,
7018                                                         "%s: Samadhana!!\n",
7019                                                          __FUNCTION__);
7020                                                 BUG();
7021                                 }
7022                         }
7023                 } else {
7024                         /*
7025                          * Packet with erroneous checksum, let the
7026                          * upper layers deal with it.
7027                          */
7028                         skb->ip_summed = CHECKSUM_NONE;
7029                 }
7030         } else {
7031                 skb->ip_summed = CHECKSUM_NONE;
7032         }
7033         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7034         if (!sp->lro) {
7035                 skb->protocol = eth_type_trans(skb, dev);
7036                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7037                         vlan_strip_flag)) {
7038                         /* Queueing the vlan frame to the upper layer */
7039                         if (napi)
7040                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7041                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7042                         else
7043                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7044                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7045                 } else {
7046                         if (napi)
7047                                 netif_receive_skb(skb);
7048                         else
7049                                 netif_rx(skb);
7050                 }
7051         } else {
7052 send_up:
7053                 queue_rx_frame(skb);
7054         }
7055         dev->last_rx = jiffies;
7056 aggregate:
7057         atomic_dec(&sp->rx_bufs_left[ring_no]);
7058         return SUCCESS;
7059 }
7060
7061 /**
7062  *  s2io_link - stops/starts the Tx queue.
7063  *  @sp : private member of the device structure, which is a pointer to the
7064  *  s2io_nic structure.
7065  *  @link : inidicates whether link is UP/DOWN.
7066  *  Description:
7067  *  This function stops/starts the Tx queue depending on whether the link
7068  *  status of the NIC is is down or up. This is called by the Alarm
7069  *  interrupt handler whenever a link change interrupt comes up.
7070  *  Return value:
7071  *  void.
7072  */
7073
7074 static void s2io_link(struct s2io_nic * sp, int link)
7075 {
7076         struct net_device *dev = (struct net_device *) sp->dev;
7077
7078         if (link != sp->last_link_state) {
7079                 if (link == LINK_DOWN) {
7080                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7081                         netif_carrier_off(dev);
7082                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7083                         sp->mac_control.stats_info->sw_stat.link_up_time = 
7084                                 jiffies - sp->start_time;
7085                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7086                 } else {
7087                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7088                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7089                         sp->mac_control.stats_info->sw_stat.link_down_time = 
7090                                 jiffies - sp->start_time;
7091                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7092                         netif_carrier_on(dev);
7093                 }
7094         }
7095         sp->last_link_state = link;
7096         sp->start_time = jiffies;
7097 }
7098
7099 /**
7100  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7101  *  @sp : private member of the device structure, which is a pointer to the
7102  *  s2io_nic structure.
7103  *  Description:
7104  *  This function initializes a few of the PCI and PCI-X configuration registers
7105  *  with recommended values.
7106  *  Return value:
7107  *  void
7108  */
7109
7110 static void s2io_init_pci(struct s2io_nic * sp)
7111 {
7112         u16 pci_cmd = 0, pcix_cmd = 0;
7113
7114         /* Enable Data Parity Error Recovery in PCI-X command register. */
7115         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7116                              &(pcix_cmd));
7117         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7118                               (pcix_cmd | 1));
7119         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7120                              &(pcix_cmd));
7121
7122         /* Set the PErr Response bit in PCI command register. */
7123         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7124         pci_write_config_word(sp->pdev, PCI_COMMAND,
7125                               (pci_cmd | PCI_COMMAND_PARITY));
7126         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7127 }
7128
7129 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7130 {
7131         if ( tx_fifo_num > 8) {
7132                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7133                          "supported\n");
7134                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7135                 tx_fifo_num = 8;
7136         }
7137         if ( rx_ring_num > 8) {
7138                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7139                          "supported\n");
7140                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7141                 rx_ring_num = 8;
7142         }
7143         if (*dev_intr_type != INTA)
7144                 napi = 0;
7145
7146 #ifndef CONFIG_PCI_MSI
7147         if (*dev_intr_type != INTA) {
7148                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7149                           "MSI/MSI-X. Defaulting to INTA\n");
7150                 *dev_intr_type = INTA;
7151         }
7152 #else
7153         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7154                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7155                           "Defaulting to INTA\n");
7156                 *dev_intr_type = INTA;
7157         }
7158 #endif
7159         if ((*dev_intr_type == MSI_X) &&
7160                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7161                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7162                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7163                                         "Defaulting to INTA\n");
7164                 *dev_intr_type = INTA;
7165         }
7166
7167         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7168                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7169                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7170                 rx_ring_mode = 1;
7171         }
7172         return SUCCESS;
7173 }
7174
7175 /**
7176  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7177  * or Traffic class respectively.
7178  * @nic: device peivate variable
7179  * Description: The function configures the receive steering to
7180  * desired receive ring.
7181  * Return Value:  SUCCESS on success and
7182  * '-1' on failure (endian settings incorrect).
7183  */
7184 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7185 {
7186         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7187         register u64 val64 = 0;
7188
7189         if (ds_codepoint > 63)
7190                 return FAILURE;
7191
7192         val64 = RTS_DS_MEM_DATA(ring);
7193         writeq(val64, &bar0->rts_ds_mem_data);
7194
7195         val64 = RTS_DS_MEM_CTRL_WE |
7196                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7197                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7198
7199         writeq(val64, &bar0->rts_ds_mem_ctrl);
7200
7201         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7202                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7203                                 S2IO_BIT_RESET);
7204 }
7205
7206 /**
7207  *  s2io_init_nic - Initialization of the adapter .
7208  *  @pdev : structure containing the PCI related information of the device.
7209  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7210  *  Description:
7211  *  The function initializes an adapter identified by the pci_dec structure.
7212  *  All OS related initialization including memory and device structure and
7213  *  initlaization of the device private variable is done. Also the swapper
7214  *  control register is initialized to enable read and write into the I/O
7215  *  registers of the device.
7216  *  Return value:
7217  *  returns 0 on success and negative on failure.
7218  */
7219
7220 static int __devinit
7221 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7222 {
7223         struct s2io_nic *sp;
7224         struct net_device *dev;
7225         int i, j, ret;
7226         int dma_flag = FALSE;
7227         u32 mac_up, mac_down;
7228         u64 val64 = 0, tmp64 = 0;
7229         struct XENA_dev_config __iomem *bar0 = NULL;
7230         u16 subid;
7231         struct mac_info *mac_control;
7232         struct config_param *config;
7233         int mode;
7234         u8 dev_intr_type = intr_type;
7235
7236         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7237                 return ret;
7238
7239         if ((ret = pci_enable_device(pdev))) {
7240                 DBG_PRINT(ERR_DBG,
7241                           "s2io_init_nic: pci_enable_device failed\n");
7242                 return ret;
7243         }
7244
7245         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7246                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7247                 dma_flag = TRUE;
7248                 if (pci_set_consistent_dma_mask
7249                     (pdev, DMA_64BIT_MASK)) {
7250                         DBG_PRINT(ERR_DBG,
7251                                   "Unable to obtain 64bit DMA for \
7252                                         consistent allocations\n");
7253                         pci_disable_device(pdev);
7254                         return -ENOMEM;
7255                 }
7256         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7257                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7258         } else {
7259                 pci_disable_device(pdev);
7260                 return -ENOMEM;
7261         }
7262         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7263                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7264                 pci_disable_device(pdev);
7265                 return -ENODEV;
7266         }
7267
7268         dev = alloc_etherdev(sizeof(struct s2io_nic));
7269         if (dev == NULL) {
7270                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7271                 pci_disable_device(pdev);
7272                 pci_release_regions(pdev);
7273                 return -ENODEV;
7274         }
7275
7276         pci_set_master(pdev);
7277         pci_set_drvdata(pdev, dev);
7278         SET_MODULE_OWNER(dev);
7279         SET_NETDEV_DEV(dev, &pdev->dev);
7280
7281         /*  Private member variable initialized to s2io NIC structure */
7282         sp = dev->priv;
7283         memset(sp, 0, sizeof(struct s2io_nic));
7284         sp->dev = dev;
7285         sp->pdev = pdev;
7286         sp->high_dma_flag = dma_flag;
7287         sp->device_enabled_once = FALSE;
7288         if (rx_ring_mode == 1)
7289                 sp->rxd_mode = RXD_MODE_1;
7290         if (rx_ring_mode == 2)
7291                 sp->rxd_mode = RXD_MODE_3B;
7292
7293         sp->intr_type = dev_intr_type;
7294
7295         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7296                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7297                 sp->device_type = XFRAME_II_DEVICE;
7298         else
7299                 sp->device_type = XFRAME_I_DEVICE;
7300
7301         sp->lro = lro;
7302
7303         /* Initialize some PCI/PCI-X fields of the NIC. */
7304         s2io_init_pci(sp);
7305
7306         /*
7307          * Setting the device configuration parameters.
7308          * Most of these parameters can be specified by the user during
7309          * module insertion as they are module loadable parameters. If
7310          * these parameters are not not specified during load time, they
7311          * are initialized with default values.
7312          */
7313         mac_control = &sp->mac_control;
7314         config = &sp->config;
7315
7316         /* Tx side parameters. */
7317         config->tx_fifo_num = tx_fifo_num;
7318         for (i = 0; i < MAX_TX_FIFOS; i++) {
7319                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7320                 config->tx_cfg[i].fifo_priority = i;
7321         }
7322
7323         /* mapping the QoS priority to the configured fifos */
7324         for (i = 0; i < MAX_TX_FIFOS; i++)
7325                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7326
7327         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7328         for (i = 0; i < config->tx_fifo_num; i++) {
7329                 config->tx_cfg[i].f_no_snoop =
7330                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7331                 if (config->tx_cfg[i].fifo_len < 65) {
7332                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7333                         break;
7334                 }
7335         }
7336         /* + 2 because one Txd for skb->data and one Txd for UFO */
7337         config->max_txds = MAX_SKB_FRAGS + 2;
7338
7339         /* Rx side parameters. */
7340         config->rx_ring_num = rx_ring_num;
7341         for (i = 0; i < MAX_RX_RINGS; i++) {
7342                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7343                     (rxd_count[sp->rxd_mode] + 1);
7344                 config->rx_cfg[i].ring_priority = i;
7345         }
7346
7347         for (i = 0; i < rx_ring_num; i++) {
7348                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7349                 config->rx_cfg[i].f_no_snoop =
7350                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7351         }
7352
7353         /*  Setting Mac Control parameters */
7354         mac_control->rmac_pause_time = rmac_pause_time;
7355         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7356         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7357
7358
7359         /* Initialize Ring buffer parameters. */
7360         for (i = 0; i < config->rx_ring_num; i++)
7361                 atomic_set(&sp->rx_bufs_left[i], 0);
7362
7363         /* Initialize the number of ISRs currently running */
7364         atomic_set(&sp->isr_cnt, 0);
7365
7366         /*  initialize the shared memory used by the NIC and the host */
7367         if (init_shared_mem(sp)) {
7368                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7369                           dev->name);
7370                 ret = -ENOMEM;
7371                 goto mem_alloc_failed;
7372         }
7373
7374         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7375                                      pci_resource_len(pdev, 0));
7376         if (!sp->bar0) {
7377                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7378                           dev->name);
7379                 ret = -ENOMEM;
7380                 goto bar0_remap_failed;
7381         }
7382
7383         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7384                                      pci_resource_len(pdev, 2));
7385         if (!sp->bar1) {
7386                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7387                           dev->name);
7388                 ret = -ENOMEM;
7389                 goto bar1_remap_failed;
7390         }
7391
7392         dev->irq = pdev->irq;
7393         dev->base_addr = (unsigned long) sp->bar0;
7394
7395         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7396         for (j = 0; j < MAX_TX_FIFOS; j++) {
7397                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7398                     (sp->bar1 + (j * 0x00020000));
7399         }
7400
7401         /*  Driver entry points */
7402         dev->open = &s2io_open;
7403         dev->stop = &s2io_close;
7404         dev->hard_start_xmit = &s2io_xmit;
7405         dev->get_stats = &s2io_get_stats;
7406         dev->set_multicast_list = &s2io_set_multicast;
7407         dev->do_ioctl = &s2io_ioctl;
7408         dev->change_mtu = &s2io_change_mtu;
7409         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7410         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7411         dev->vlan_rx_register = s2io_vlan_rx_register;
7412
7413         /*
7414          * will use eth_mac_addr() for  dev->set_mac_address
7415          * mac address will be set every time dev->open() is called
7416          */
7417         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7418
7419 #ifdef CONFIG_NET_POLL_CONTROLLER
7420         dev->poll_controller = s2io_netpoll;
7421 #endif
7422
7423         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7424         if (sp->high_dma_flag == TRUE)
7425                 dev->features |= NETIF_F_HIGHDMA;
7426         dev->features |= NETIF_F_TSO;
7427         dev->features |= NETIF_F_TSO6;
7428         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7429                 dev->features |= NETIF_F_UFO;
7430                 dev->features |= NETIF_F_HW_CSUM;
7431         }
7432
7433         dev->tx_timeout = &s2io_tx_watchdog;
7434         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7435         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7436         INIT_WORK(&sp->set_link_task, s2io_set_link);
7437
7438         pci_save_state(sp->pdev);
7439
7440         /* Setting swapper control on the NIC, for proper reset operation */
7441         if (s2io_set_swapper(sp)) {
7442                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7443                           dev->name);
7444                 ret = -EAGAIN;
7445                 goto set_swap_failed;
7446         }
7447
7448         /* Verify if the Herc works on the slot its placed into */
7449         if (sp->device_type & XFRAME_II_DEVICE) {
7450                 mode = s2io_verify_pci_mode(sp);
7451                 if (mode < 0) {
7452                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7453                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7454                         ret = -EBADSLT;
7455                         goto set_swap_failed;
7456                 }
7457         }
7458
7459         /* Not needed for Herc */
7460         if (sp->device_type & XFRAME_I_DEVICE) {
7461                 /*
7462                  * Fix for all "FFs" MAC address problems observed on
7463                  * Alpha platforms
7464                  */
7465                 fix_mac_address(sp);
7466                 s2io_reset(sp);
7467         }
7468
7469         /*
7470          * MAC address initialization.
7471          * For now only one mac address will be read and used.
7472          */
7473         bar0 = sp->bar0;
7474         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7475             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7476         writeq(val64, &bar0->rmac_addr_cmd_mem);
7477         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7478                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7479         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7480         mac_down = (u32) tmp64;
7481         mac_up = (u32) (tmp64 >> 32);
7482
7483         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7484         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7485         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7486         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7487         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7488         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7489
7490         /*  Set the factory defined MAC address initially   */
7491         dev->addr_len = ETH_ALEN;
7492         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7493
7494          /* Store the values of the MSIX table in the s2io_nic structure */
7495         store_xmsi_data(sp);
7496         /* reset Nic and bring it to known state */
7497         s2io_reset(sp);
7498
7499         /*
7500          * Initialize the tasklet status and link state flags
7501          * and the card state parameter
7502          */
7503         atomic_set(&(sp->card_state), 0);
7504         sp->tasklet_status = 0;
7505         sp->link_state = 0;
7506
7507         /* Initialize spinlocks */
7508         spin_lock_init(&sp->tx_lock);
7509
7510         if (!napi)
7511                 spin_lock_init(&sp->put_lock);
7512         spin_lock_init(&sp->rx_lock);
7513
7514         /*
7515          * SXE-002: Configure link and activity LED to init state
7516          * on driver load.
7517          */
7518         subid = sp->pdev->subsystem_device;
7519         if ((subid & 0xFF) >= 0x07) {
7520                 val64 = readq(&bar0->gpio_control);
7521                 val64 |= 0x0000800000000000ULL;
7522                 writeq(val64, &bar0->gpio_control);
7523                 val64 = 0x0411040400000000ULL;
7524                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7525                 val64 = readq(&bar0->gpio_control);
7526         }
7527
7528         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7529
7530         if (register_netdev(dev)) {
7531                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7532                 ret = -ENODEV;
7533                 goto register_failed;
7534         }
7535         s2io_vpd_read(sp);
7536         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7537         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7538                   sp->product_name, pdev->revision);
7539         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7540                   s2io_driver_version);
7541         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7542                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7543                           sp->def_mac_addr[0].mac_addr[0],
7544                           sp->def_mac_addr[0].mac_addr[1],
7545                           sp->def_mac_addr[0].mac_addr[2],
7546                           sp->def_mac_addr[0].mac_addr[3],
7547                           sp->def_mac_addr[0].mac_addr[4],
7548                           sp->def_mac_addr[0].mac_addr[5]);
7549         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7550         if (sp->device_type & XFRAME_II_DEVICE) {
7551                 mode = s2io_print_pci_mode(sp);
7552                 if (mode < 0) {
7553                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7554                         ret = -EBADSLT;
7555                         unregister_netdev(dev);
7556                         goto set_swap_failed;
7557                 }
7558         }
7559         switch(sp->rxd_mode) {
7560                 case RXD_MODE_1:
7561                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7562                                                 dev->name);
7563                     break;
7564                 case RXD_MODE_3B:
7565                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7566                                                 dev->name);
7567                     break;
7568         }
7569
7570         if (napi)
7571                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7572         switch(sp->intr_type) {
7573                 case INTA:
7574                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7575                     break;
7576                 case MSI_X:
7577                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7578                     break;
7579         }
7580         if (sp->lro)
7581                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7582                           dev->name);
7583         if (ufo)
7584                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7585                                         " enabled\n", dev->name);
7586         /* Initialize device name */
7587         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7588
7589         /* Initialize bimodal Interrupts */
7590         sp->config.bimodal = bimodal;
7591         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7592                 sp->config.bimodal = 0;
7593                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7594                         dev->name);
7595         }
7596
7597         /*
7598          * Make Link state as off at this point, when the Link change
7599          * interrupt comes the state will be automatically changed to
7600          * the right state.
7601          */
7602         netif_carrier_off(dev);
7603
7604         return 0;
7605
7606       register_failed:
7607       set_swap_failed:
7608         iounmap(sp->bar1);
7609       bar1_remap_failed:
7610         iounmap(sp->bar0);
7611       bar0_remap_failed:
7612       mem_alloc_failed:
7613         free_shared_mem(sp);
7614         pci_disable_device(pdev);
7615         pci_release_regions(pdev);
7616         pci_set_drvdata(pdev, NULL);
7617         free_netdev(dev);
7618
7619         return ret;
7620 }
7621
7622 /**
7623  * s2io_rem_nic - Free the PCI device
7624  * @pdev: structure containing the PCI related information of the device.
7625  * Description: This function is called by the Pci subsystem to release a
7626  * PCI device and free up all resource held up by the device. This could
7627  * be in response to a Hot plug event or when the driver is to be removed
7628  * from memory.
7629  */
7630
7631 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7632 {
7633         struct net_device *dev =
7634             (struct net_device *) pci_get_drvdata(pdev);
7635         struct s2io_nic *sp;
7636
7637         if (dev == NULL) {
7638                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7639                 return;
7640         }
7641
7642         flush_scheduled_work();
7643
7644         sp = dev->priv;
7645         unregister_netdev(dev);
7646
7647         free_shared_mem(sp);
7648         iounmap(sp->bar0);
7649         iounmap(sp->bar1);
7650         pci_release_regions(pdev);
7651         pci_set_drvdata(pdev, NULL);
7652         free_netdev(dev);
7653         pci_disable_device(pdev);
7654 }
7655
7656 /**
7657  * s2io_starter - Entry point for the driver
7658  * Description: This function is the entry point for the driver. It verifies
7659  * the module loadable parameters and initializes PCI configuration space.
7660  */
7661
7662 int __init s2io_starter(void)
7663 {
7664         return pci_register_driver(&s2io_driver);
7665 }
7666
7667 /**
7668  * s2io_closer - Cleanup routine for the driver
7669  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7670  */
7671
7672 static __exit void s2io_closer(void)
7673 {
7674         pci_unregister_driver(&s2io_driver);
7675         DBG_PRINT(INIT_DBG, "cleanup done\n");
7676 }
7677
7678 module_init(s2io_starter);
7679 module_exit(s2io_closer);
7680
7681 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7682                 struct tcphdr **tcp, struct RxD_t *rxdp)
7683 {
7684         int ip_off;
7685         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7686
7687         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7688                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7689                           __FUNCTION__);
7690                 return -1;
7691         }
7692
7693         /* TODO:
7694          * By default the VLAN field in the MAC is stripped by the card, if this
7695          * feature is turned off in rx_pa_cfg register, then the ip_off field
7696          * has to be shifted by a further 2 bytes
7697          */
7698         switch (l2_type) {
7699                 case 0: /* DIX type */
7700                 case 4: /* DIX type with VLAN */
7701                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7702                         break;
7703                 /* LLC, SNAP etc are considered non-mergeable */
7704                 default:
7705                         return -1;
7706         }
7707
7708         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7709         ip_len = (u8)((*ip)->ihl);
7710         ip_len <<= 2;
7711         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7712
7713         return 0;
7714 }
7715
7716 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7717                                   struct tcphdr *tcp)
7718 {
7719         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7720         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7721            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7722                 return -1;
7723         return 0;
7724 }
7725
7726 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7727 {
7728         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7729 }
7730
7731 static void initiate_new_session(struct lro *lro, u8 *l2h,
7732                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7733 {
7734         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7735         lro->l2h = l2h;
7736         lro->iph = ip;
7737         lro->tcph = tcp;
7738         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7739         lro->tcp_ack = ntohl(tcp->ack_seq);
7740         lro->sg_num = 1;
7741         lro->total_len = ntohs(ip->tot_len);
7742         lro->frags_len = 0;
7743         /*
7744          * check if we saw TCP timestamp. Other consistency checks have
7745          * already been done.
7746          */
7747         if (tcp->doff == 8) {
7748                 u32 *ptr;
7749                 ptr = (u32 *)(tcp+1);
7750                 lro->saw_ts = 1;
7751                 lro->cur_tsval = *(ptr+1);
7752                 lro->cur_tsecr = *(ptr+2);
7753         }
7754         lro->in_use = 1;
7755 }
7756
7757 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7758 {
7759         struct iphdr *ip = lro->iph;
7760         struct tcphdr *tcp = lro->tcph;
7761         __sum16 nchk;
7762         struct stat_block *statinfo = sp->mac_control.stats_info;
7763         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7764
7765         /* Update L3 header */
7766         ip->tot_len = htons(lro->total_len);
7767         ip->check = 0;
7768         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7769         ip->check = nchk;
7770
7771         /* Update L4 header */
7772         tcp->ack_seq = lro->tcp_ack;
7773         tcp->window = lro->window;
7774
7775         /* Update tsecr field if this session has timestamps enabled */
7776         if (lro->saw_ts) {
7777                 u32 *ptr = (u32 *)(tcp + 1);
7778                 *(ptr+2) = lro->cur_tsecr;
7779         }
7780
7781         /* Update counters required for calculation of
7782          * average no. of packets aggregated.
7783          */
7784         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7785         statinfo->sw_stat.num_aggregations++;
7786 }
7787
7788 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7789                 struct tcphdr *tcp, u32 l4_pyld)
7790 {
7791         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7792         lro->total_len += l4_pyld;
7793         lro->frags_len += l4_pyld;
7794         lro->tcp_next_seq += l4_pyld;
7795         lro->sg_num++;
7796
7797         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7798         lro->tcp_ack = tcp->ack_seq;
7799         lro->window = tcp->window;
7800
7801         if (lro->saw_ts) {
7802                 u32 *ptr;
7803                 /* Update tsecr and tsval from this packet */
7804                 ptr = (u32 *) (tcp + 1);
7805                 lro->cur_tsval = *(ptr + 1);
7806                 lro->cur_tsecr = *(ptr + 2);
7807         }
7808 }
7809
7810 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7811                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7812 {
7813         u8 *ptr;
7814
7815         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7816
7817         if (!tcp_pyld_len) {
7818                 /* Runt frame or a pure ack */
7819                 return -1;
7820         }
7821
7822         if (ip->ihl != 5) /* IP has options */
7823                 return -1;
7824
7825         /* If we see CE codepoint in IP header, packet is not mergeable */
7826         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7827                 return -1;
7828
7829         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7830         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7831                                     tcp->ece || tcp->cwr || !tcp->ack) {
7832                 /*
7833                  * Currently recognize only the ack control word and
7834                  * any other control field being set would result in
7835                  * flushing the LRO session
7836                  */
7837                 return -1;
7838         }
7839
7840         /*
7841          * Allow only one TCP timestamp option. Don't aggregate if
7842          * any other options are detected.
7843          */
7844         if (tcp->doff != 5 && tcp->doff != 8)
7845                 return -1;
7846
7847         if (tcp->doff == 8) {
7848                 ptr = (u8 *)(tcp + 1);
7849                 while (*ptr == TCPOPT_NOP)
7850                         ptr++;
7851                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7852                         return -1;
7853
7854                 /* Ensure timestamp value increases monotonically */
7855                 if (l_lro)
7856                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7857                                 return -1;
7858
7859                 /* timestamp echo reply should be non-zero */
7860                 if (*((u32 *)(ptr+6)) == 0)
7861                         return -1;
7862         }
7863
7864         return 0;
7865 }
7866
7867 static int
7868 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7869                       struct RxD_t *rxdp, struct s2io_nic *sp)
7870 {
7871         struct iphdr *ip;
7872         struct tcphdr *tcph;
7873         int ret = 0, i;
7874
7875         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7876                                          rxdp))) {
7877                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7878                           ip->saddr, ip->daddr);
7879         } else {
7880                 return ret;
7881         }
7882
7883         tcph = (struct tcphdr *)*tcp;
7884         *tcp_len = get_l4_pyld_length(ip, tcph);
7885         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7886                 struct lro *l_lro = &sp->lro0_n[i];
7887                 if (l_lro->in_use) {
7888                         if (check_for_socket_match(l_lro, ip, tcph))
7889                                 continue;
7890                         /* Sock pair matched */
7891                         *lro = l_lro;
7892
7893                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7894                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7895                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7896                                           (*lro)->tcp_next_seq,
7897                                           ntohl(tcph->seq));
7898
7899                                 sp->mac_control.stats_info->
7900                                    sw_stat.outof_sequence_pkts++;
7901                                 ret = 2;
7902                                 break;
7903                         }
7904
7905                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7906                                 ret = 1; /* Aggregate */
7907                         else
7908                                 ret = 2; /* Flush both */
7909                         break;
7910                 }
7911         }
7912
7913         if (ret == 0) {
7914                 /* Before searching for available LRO objects,
7915                  * check if the pkt is L3/L4 aggregatable. If not
7916                  * don't create new LRO session. Just send this
7917                  * packet up.
7918                  */
7919                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7920                         return 5;
7921                 }
7922
7923                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7924                         struct lro *l_lro = &sp->lro0_n[i];
7925                         if (!(l_lro->in_use)) {
7926                                 *lro = l_lro;
7927                                 ret = 3; /* Begin anew */
7928                                 break;
7929                         }
7930                 }
7931         }
7932
7933         if (ret == 0) { /* sessions exceeded */
7934                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7935                           __FUNCTION__);
7936                 *lro = NULL;
7937                 return ret;
7938         }
7939
7940         switch (ret) {
7941                 case 3:
7942                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7943                         break;
7944                 case 2:
7945                         update_L3L4_header(sp, *lro);
7946                         break;
7947                 case 1:
7948                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7949                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7950                                 update_L3L4_header(sp, *lro);
7951                                 ret = 4; /* Flush the LRO */
7952                         }
7953                         break;
7954                 default:
7955                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7956                                 __FUNCTION__);
7957                         break;
7958         }
7959
7960         return ret;
7961 }
7962
7963 static void clear_lro_session(struct lro *lro)
7964 {
7965         static u16 lro_struct_size = sizeof(struct lro);
7966
7967         memset(lro, 0, lro_struct_size);
7968 }
7969
7970 static void queue_rx_frame(struct sk_buff *skb)
7971 {
7972         struct net_device *dev = skb->dev;
7973
7974         skb->protocol = eth_type_trans(skb, dev);
7975         if (napi)
7976                 netif_receive_skb(skb);
7977         else
7978                 netif_rx(skb);
7979 }
7980
7981 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7982                            struct sk_buff *skb,
7983                            u32 tcp_len)
7984 {
7985         struct sk_buff *first = lro->parent;
7986
7987         first->len += tcp_len;
7988         first->data_len = lro->frags_len;
7989         skb_pull(skb, (skb->len - tcp_len));
7990         if (skb_shinfo(first)->frag_list)
7991                 lro->last_frag->next = skb;
7992         else
7993                 skb_shinfo(first)->frag_list = skb;
7994         first->truesize += skb->truesize;
7995         lro->last_frag = skb;
7996         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7997         return;
7998 }
7999
8000 /**
8001  * s2io_io_error_detected - called when PCI error is detected
8002  * @pdev: Pointer to PCI device
8003  * @state: The current pci connection state
8004  *
8005  * This function is called after a PCI bus error affecting
8006  * this device has been detected.
8007  */
8008 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8009                                                pci_channel_state_t state)
8010 {
8011         struct net_device *netdev = pci_get_drvdata(pdev);
8012         struct s2io_nic *sp = netdev->priv;
8013
8014         netif_device_detach(netdev);
8015
8016         if (netif_running(netdev)) {
8017                 /* Bring down the card, while avoiding PCI I/O */
8018                 do_s2io_card_down(sp, 0);
8019         }
8020         pci_disable_device(pdev);
8021
8022         return PCI_ERS_RESULT_NEED_RESET;
8023 }
8024
8025 /**
8026  * s2io_io_slot_reset - called after the pci bus has been reset.
8027  * @pdev: Pointer to PCI device
8028  *
8029  * Restart the card from scratch, as if from a cold-boot.
8030  * At this point, the card has exprienced a hard reset,
8031  * followed by fixups by BIOS, and has its config space
8032  * set up identically to what it was at cold boot.
8033  */
8034 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8035 {
8036         struct net_device *netdev = pci_get_drvdata(pdev);
8037         struct s2io_nic *sp = netdev->priv;
8038
8039         if (pci_enable_device(pdev)) {
8040                 printk(KERN_ERR "s2io: "
8041                        "Cannot re-enable PCI device after reset.\n");
8042                 return PCI_ERS_RESULT_DISCONNECT;
8043         }
8044
8045         pci_set_master(pdev);
8046         s2io_reset(sp);
8047
8048         return PCI_ERS_RESULT_RECOVERED;
8049 }
8050
8051 /**
8052  * s2io_io_resume - called when traffic can start flowing again.
8053  * @pdev: Pointer to PCI device
8054  *
8055  * This callback is called when the error recovery driver tells
8056  * us that its OK to resume normal operation.
8057  */
8058 static void s2io_io_resume(struct pci_dev *pdev)
8059 {
8060         struct net_device *netdev = pci_get_drvdata(pdev);
8061         struct s2io_nic *sp = netdev->priv;
8062
8063         if (netif_running(netdev)) {
8064                 if (s2io_card_up(sp)) {
8065                         printk(KERN_ERR "s2io: "
8066                                "Can't bring device back up after reset.\n");
8067                         return;
8068                 }
8069
8070                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8071                         s2io_card_down(sp);
8072                         printk(KERN_ERR "s2io: "
8073                                "Can't resetore mac addr after reset.\n");
8074                         return;
8075                 }
8076         }
8077
8078         netif_device_attach(netdev);
8079         netif_wake_queue(netdev);
8080 }