1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.26.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"),
285 ("pci_map_fail_cnt"),
286 ("watchdog_timer_cnt"),
293 ("tx_tcode_buf_abort_cnt"),
294 ("tx_tcode_desc_abort_cnt"),
295 ("tx_tcode_parity_err_cnt"),
296 ("tx_tcode_link_loss_cnt"),
297 ("tx_tcode_list_proc_err_cnt"),
298 ("rx_tcode_parity_err_cnt"),
299 ("rx_tcode_abort_cnt"),
300 ("rx_tcode_parity_abort_cnt"),
301 ("rx_tcode_rda_fail_cnt"),
302 ("rx_tcode_unkn_prot_cnt"),
303 ("rx_tcode_fcs_err_cnt"),
304 ("rx_tcode_buf_size_err_cnt"),
305 ("rx_tcode_rxd_corrupt_cnt"),
306 ("rx_tcode_unkn_err_cnt")
309 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
310 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
312 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
314 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
315 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
317 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
318 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
320 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
321 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
323 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
324 init_timer(&timer); \
325 timer.function = handle; \
326 timer.data = (unsigned long) arg; \
327 mod_timer(&timer, (jiffies + exp)) \
330 static void s2io_vlan_rx_register(struct net_device *dev,
331 struct vlan_group *grp)
333 struct s2io_nic *nic = dev->priv;
336 spin_lock_irqsave(&nic->tx_lock, flags);
338 spin_unlock_irqrestore(&nic->tx_lock, flags);
341 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
342 static int vlan_strip_flag;
345 * Constants to be programmed into the Xena's registers, to configure
350 static const u64 herc_act_dtx_cfg[] = {
352 0x8000051536750000ULL, 0x80000515367500E0ULL,
354 0x8000051536750004ULL, 0x80000515367500E4ULL,
356 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
358 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
360 0x801205150D440000ULL, 0x801205150D4400E0ULL,
362 0x801205150D440004ULL, 0x801205150D4400E4ULL,
364 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
366 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
371 static const u64 xena_dtx_cfg[] = {
373 0x8000051500000000ULL, 0x80000515000000E0ULL,
375 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
377 0x8001051500000000ULL, 0x80010515000000E0ULL,
379 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
381 0x8002051500000000ULL, 0x80020515000000E0ULL,
383 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
388 * Constants for Fixing the MacAddress problem seen mostly on
391 static const u64 fix_mac[] = {
392 0x0060000000000000ULL, 0x0060600000000000ULL,
393 0x0040600000000000ULL, 0x0000600000000000ULL,
394 0x0020600000000000ULL, 0x0060600000000000ULL,
395 0x0020600000000000ULL, 0x0060600000000000ULL,
396 0x0020600000000000ULL, 0x0060600000000000ULL,
397 0x0020600000000000ULL, 0x0060600000000000ULL,
398 0x0020600000000000ULL, 0x0060600000000000ULL,
399 0x0020600000000000ULL, 0x0060600000000000ULL,
400 0x0020600000000000ULL, 0x0060600000000000ULL,
401 0x0020600000000000ULL, 0x0060600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0000600000000000ULL,
405 0x0040600000000000ULL, 0x0060600000000000ULL,
409 MODULE_LICENSE("GPL");
410 MODULE_VERSION(DRV_VERSION);
413 /* Module Loadable parameters. */
414 S2IO_PARM_INT(tx_fifo_num, 1);
415 S2IO_PARM_INT(rx_ring_num, 1);
418 S2IO_PARM_INT(rx_ring_mode, 1);
419 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
420 S2IO_PARM_INT(rmac_pause_time, 0x100);
421 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
422 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
423 S2IO_PARM_INT(shared_splits, 0);
424 S2IO_PARM_INT(tmac_util_period, 5);
425 S2IO_PARM_INT(rmac_util_period, 5);
426 S2IO_PARM_INT(bimodal, 0);
427 S2IO_PARM_INT(l3l4hdr_size, 128);
428 /* Frequency of Rx desc syncs expressed as power of 2 */
429 S2IO_PARM_INT(rxsync_frequency, 3);
430 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
431 S2IO_PARM_INT(intr_type, 2);
432 /* Large receive offload feature */
433 S2IO_PARM_INT(lro, 0);
434 /* Max pkts to be aggregated by LRO at one time. If not specified,
435 * aggregation happens until we hit max IP pkt size(64K)
437 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
438 S2IO_PARM_INT(indicate_max_pkts, 0);
440 S2IO_PARM_INT(napi, 1);
441 S2IO_PARM_INT(ufo, 0);
442 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
444 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
445 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
446 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
447 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
448 static unsigned int rts_frm_len[MAX_RX_RINGS] =
449 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
451 module_param_array(tx_fifo_len, uint, NULL, 0);
452 module_param_array(rx_ring_sz, uint, NULL, 0);
453 module_param_array(rts_frm_len, uint, NULL, 0);
457 * This table lists all the devices that this driver supports.
459 static struct pci_device_id s2io_tbl[] __devinitdata = {
460 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
461 PCI_ANY_ID, PCI_ANY_ID},
462 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
463 PCI_ANY_ID, PCI_ANY_ID},
464 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
465 PCI_ANY_ID, PCI_ANY_ID},
466 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
467 PCI_ANY_ID, PCI_ANY_ID},
471 MODULE_DEVICE_TABLE(pci, s2io_tbl);
473 static struct pci_error_handlers s2io_err_handler = {
474 .error_detected = s2io_io_error_detected,
475 .slot_reset = s2io_io_slot_reset,
476 .resume = s2io_io_resume,
479 static struct pci_driver s2io_driver = {
481 .id_table = s2io_tbl,
482 .probe = s2io_init_nic,
483 .remove = __devexit_p(s2io_rem_nic),
484 .err_handler = &s2io_err_handler,
487 /* A simplifier macro used both by init and free shared_mem Fns(). */
488 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
491 * init_shared_mem - Allocation and Initialization of Memory
492 * @nic: Device private variable.
493 * Description: The function allocates all the memory areas shared
494 * between the NIC and the driver. This includes Tx descriptors,
495 * Rx descriptors and the statistics block.
498 static int init_shared_mem(struct s2io_nic *nic)
501 void *tmp_v_addr, *tmp_v_addr_next;
502 dma_addr_t tmp_p_addr, tmp_p_addr_next;
503 struct RxD_block *pre_rxd_blk = NULL;
505 int lst_size, lst_per_page;
506 struct net_device *dev = nic->dev;
510 struct mac_info *mac_control;
511 struct config_param *config;
512 unsigned long long mem_allocated = 0;
514 mac_control = &nic->mac_control;
515 config = &nic->config;
518 /* Allocation and initialization of TXDLs in FIOFs */
520 for (i = 0; i < config->tx_fifo_num; i++) {
521 size += config->tx_cfg[i].fifo_len;
523 if (size > MAX_AVAILABLE_TXDS) {
524 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
525 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
529 lst_size = (sizeof(struct TxD) * config->max_txds);
530 lst_per_page = PAGE_SIZE / lst_size;
532 for (i = 0; i < config->tx_fifo_num; i++) {
533 int fifo_len = config->tx_cfg[i].fifo_len;
534 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
535 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
537 if (!mac_control->fifos[i].list_info) {
539 "Malloc failed for list_info\n");
542 mem_allocated += list_holder_size;
543 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
545 for (i = 0; i < config->tx_fifo_num; i++) {
546 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
548 mac_control->fifos[i].tx_curr_put_info.offset = 0;
549 mac_control->fifos[i].tx_curr_put_info.fifo_len =
550 config->tx_cfg[i].fifo_len - 1;
551 mac_control->fifos[i].tx_curr_get_info.offset = 0;
552 mac_control->fifos[i].tx_curr_get_info.fifo_len =
553 config->tx_cfg[i].fifo_len - 1;
554 mac_control->fifos[i].fifo_no = i;
555 mac_control->fifos[i].nic = nic;
556 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
558 for (j = 0; j < page_num; j++) {
562 tmp_v = pci_alloc_consistent(nic->pdev,
566 "pci_alloc_consistent ");
567 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
570 /* If we got a zero DMA address(can happen on
571 * certain platforms like PPC), reallocate.
572 * Store virtual address of page we don't want,
576 mac_control->zerodma_virt_addr = tmp_v;
578 "%s: Zero DMA address for TxDL. ", dev->name);
580 "Virtual address %p\n", tmp_v);
581 tmp_v = pci_alloc_consistent(nic->pdev,
585 "pci_alloc_consistent ");
586 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
589 mem_allocated += PAGE_SIZE;
591 while (k < lst_per_page) {
592 int l = (j * lst_per_page) + k;
593 if (l == config->tx_cfg[i].fifo_len)
595 mac_control->fifos[i].list_info[l].list_virt_addr =
596 tmp_v + (k * lst_size);
597 mac_control->fifos[i].list_info[l].list_phy_addr =
598 tmp_p + (k * lst_size);
604 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
605 if (!nic->ufo_in_band_v)
607 mem_allocated += (size * sizeof(u64));
609 /* Allocation and initialization of RXDs in Rings */
611 for (i = 0; i < config->rx_ring_num; i++) {
612 if (config->rx_cfg[i].num_rxd %
613 (rxd_count[nic->rxd_mode] + 1)) {
614 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
615 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
617 DBG_PRINT(ERR_DBG, "RxDs per Block");
620 size += config->rx_cfg[i].num_rxd;
621 mac_control->rings[i].block_count =
622 config->rx_cfg[i].num_rxd /
623 (rxd_count[nic->rxd_mode] + 1 );
624 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
625 mac_control->rings[i].block_count;
627 if (nic->rxd_mode == RXD_MODE_1)
628 size = (size * (sizeof(struct RxD1)));
630 size = (size * (sizeof(struct RxD3)));
632 for (i = 0; i < config->rx_ring_num; i++) {
633 mac_control->rings[i].rx_curr_get_info.block_index = 0;
634 mac_control->rings[i].rx_curr_get_info.offset = 0;
635 mac_control->rings[i].rx_curr_get_info.ring_len =
636 config->rx_cfg[i].num_rxd - 1;
637 mac_control->rings[i].rx_curr_put_info.block_index = 0;
638 mac_control->rings[i].rx_curr_put_info.offset = 0;
639 mac_control->rings[i].rx_curr_put_info.ring_len =
640 config->rx_cfg[i].num_rxd - 1;
641 mac_control->rings[i].nic = nic;
642 mac_control->rings[i].ring_no = i;
644 blk_cnt = config->rx_cfg[i].num_rxd /
645 (rxd_count[nic->rxd_mode] + 1);
646 /* Allocating all the Rx blocks */
647 for (j = 0; j < blk_cnt; j++) {
648 struct rx_block_info *rx_blocks;
651 rx_blocks = &mac_control->rings[i].rx_blocks[j];
652 size = SIZE_OF_BLOCK; //size is always page size
653 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
655 if (tmp_v_addr == NULL) {
657 * In case of failure, free_shared_mem()
658 * is called, which should free any
659 * memory that was alloced till the
662 rx_blocks->block_virt_addr = tmp_v_addr;
665 mem_allocated += size;
666 memset(tmp_v_addr, 0, size);
667 rx_blocks->block_virt_addr = tmp_v_addr;
668 rx_blocks->block_dma_addr = tmp_p_addr;
669 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
670 rxd_count[nic->rxd_mode],
672 if (!rx_blocks->rxds)
675 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
676 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
677 rx_blocks->rxds[l].virt_addr =
678 rx_blocks->block_virt_addr +
679 (rxd_size[nic->rxd_mode] * l);
680 rx_blocks->rxds[l].dma_addr =
681 rx_blocks->block_dma_addr +
682 (rxd_size[nic->rxd_mode] * l);
685 /* Interlinking all Rx Blocks */
686 for (j = 0; j < blk_cnt; j++) {
688 mac_control->rings[i].rx_blocks[j].block_virt_addr;
690 mac_control->rings[i].rx_blocks[(j + 1) %
691 blk_cnt].block_virt_addr;
693 mac_control->rings[i].rx_blocks[j].block_dma_addr;
695 mac_control->rings[i].rx_blocks[(j + 1) %
696 blk_cnt].block_dma_addr;
698 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
699 pre_rxd_blk->reserved_2_pNext_RxD_block =
700 (unsigned long) tmp_v_addr_next;
701 pre_rxd_blk->pNext_RxD_Blk_physical =
702 (u64) tmp_p_addr_next;
705 if (nic->rxd_mode == RXD_MODE_3B) {
707 * Allocation of Storages for buffer addresses in 2BUFF mode
708 * and the buffers as well.
710 for (i = 0; i < config->rx_ring_num; i++) {
711 blk_cnt = config->rx_cfg[i].num_rxd /
712 (rxd_count[nic->rxd_mode]+ 1);
713 mac_control->rings[i].ba =
714 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
716 if (!mac_control->rings[i].ba)
718 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
719 for (j = 0; j < blk_cnt; j++) {
721 mac_control->rings[i].ba[j] =
722 kmalloc((sizeof(struct buffAdd) *
723 (rxd_count[nic->rxd_mode] + 1)),
725 if (!mac_control->rings[i].ba[j])
727 mem_allocated += (sizeof(struct buffAdd) * \
728 (rxd_count[nic->rxd_mode] + 1));
729 while (k != rxd_count[nic->rxd_mode]) {
730 ba = &mac_control->rings[i].ba[j][k];
732 ba->ba_0_org = (void *) kmalloc
733 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
737 (BUF0_LEN + ALIGN_SIZE);
738 tmp = (unsigned long)ba->ba_0_org;
740 tmp &= ~((unsigned long) ALIGN_SIZE);
741 ba->ba_0 = (void *) tmp;
743 ba->ba_1_org = (void *) kmalloc
744 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
748 += (BUF1_LEN + ALIGN_SIZE);
749 tmp = (unsigned long) ba->ba_1_org;
751 tmp &= ~((unsigned long) ALIGN_SIZE);
752 ba->ba_1 = (void *) tmp;
759 /* Allocation and initialization of Statistics block */
760 size = sizeof(struct stat_block);
761 mac_control->stats_mem = pci_alloc_consistent
762 (nic->pdev, size, &mac_control->stats_mem_phy);
764 if (!mac_control->stats_mem) {
766 * In case of failure, free_shared_mem() is called, which
767 * should free any memory that was alloced till the
772 mem_allocated += size;
773 mac_control->stats_mem_sz = size;
775 tmp_v_addr = mac_control->stats_mem;
776 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
777 memset(tmp_v_addr, 0, size);
778 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
779 (unsigned long long) tmp_p_addr);
780 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
785 * free_shared_mem - Free the allocated Memory
786 * @nic: Device private variable.
787 * Description: This function is to free all memory locations allocated by
788 * the init_shared_mem() function and return it to the kernel.
791 static void free_shared_mem(struct s2io_nic *nic)
793 int i, j, blk_cnt, size;
796 dma_addr_t tmp_p_addr;
797 struct mac_info *mac_control;
798 struct config_param *config;
799 int lst_size, lst_per_page;
800 struct net_device *dev;
808 mac_control = &nic->mac_control;
809 config = &nic->config;
811 lst_size = (sizeof(struct TxD) * config->max_txds);
812 lst_per_page = PAGE_SIZE / lst_size;
814 for (i = 0; i < config->tx_fifo_num; i++) {
815 ufo_size += config->tx_cfg[i].fifo_len;
816 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
818 for (j = 0; j < page_num; j++) {
819 int mem_blks = (j * lst_per_page);
820 if (!mac_control->fifos[i].list_info)
822 if (!mac_control->fifos[i].list_info[mem_blks].
825 pci_free_consistent(nic->pdev, PAGE_SIZE,
826 mac_control->fifos[i].
829 mac_control->fifos[i].
832 nic->mac_control.stats_info->sw_stat.mem_freed
835 /* If we got a zero DMA address during allocation,
838 if (mac_control->zerodma_virt_addr) {
839 pci_free_consistent(nic->pdev, PAGE_SIZE,
840 mac_control->zerodma_virt_addr,
843 "%s: Freeing TxDL with zero DMA addr. ",
845 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
846 mac_control->zerodma_virt_addr);
847 nic->mac_control.stats_info->sw_stat.mem_freed
850 kfree(mac_control->fifos[i].list_info);
851 nic->mac_control.stats_info->sw_stat.mem_freed +=
852 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
855 size = SIZE_OF_BLOCK;
856 for (i = 0; i < config->rx_ring_num; i++) {
857 blk_cnt = mac_control->rings[i].block_count;
858 for (j = 0; j < blk_cnt; j++) {
859 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
861 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
863 if (tmp_v_addr == NULL)
865 pci_free_consistent(nic->pdev, size,
866 tmp_v_addr, tmp_p_addr);
867 nic->mac_control.stats_info->sw_stat.mem_freed += size;
868 kfree(mac_control->rings[i].rx_blocks[j].rxds);
869 nic->mac_control.stats_info->sw_stat.mem_freed +=
870 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
874 if (nic->rxd_mode == RXD_MODE_3B) {
875 /* Freeing buffer storage addresses in 2BUFF mode. */
876 for (i = 0; i < config->rx_ring_num; i++) {
877 blk_cnt = config->rx_cfg[i].num_rxd /
878 (rxd_count[nic->rxd_mode] + 1);
879 for (j = 0; j < blk_cnt; j++) {
881 if (!mac_control->rings[i].ba[j])
883 while (k != rxd_count[nic->rxd_mode]) {
885 &mac_control->rings[i].ba[j][k];
887 nic->mac_control.stats_info->sw_stat.\
888 mem_freed += (BUF0_LEN + ALIGN_SIZE);
890 nic->mac_control.stats_info->sw_stat.\
891 mem_freed += (BUF1_LEN + ALIGN_SIZE);
894 kfree(mac_control->rings[i].ba[j]);
895 nic->mac_control.stats_info->sw_stat.mem_freed +=
896 (sizeof(struct buffAdd) *
897 (rxd_count[nic->rxd_mode] + 1));
899 kfree(mac_control->rings[i].ba);
900 nic->mac_control.stats_info->sw_stat.mem_freed +=
901 (sizeof(struct buffAdd *) * blk_cnt);
905 if (mac_control->stats_mem) {
906 pci_free_consistent(nic->pdev,
907 mac_control->stats_mem_sz,
908 mac_control->stats_mem,
909 mac_control->stats_mem_phy);
910 nic->mac_control.stats_info->sw_stat.mem_freed +=
911 mac_control->stats_mem_sz;
913 if (nic->ufo_in_band_v) {
914 kfree(nic->ufo_in_band_v);
915 nic->mac_control.stats_info->sw_stat.mem_freed
916 += (ufo_size * sizeof(u64));
921 * s2io_verify_pci_mode -
924 static int s2io_verify_pci_mode(struct s2io_nic *nic)
926 struct XENA_dev_config __iomem *bar0 = nic->bar0;
927 register u64 val64 = 0;
930 val64 = readq(&bar0->pci_mode);
931 mode = (u8)GET_PCI_MODE(val64);
933 if ( val64 & PCI_MODE_UNKNOWN_MODE)
934 return -1; /* Unknown PCI mode */
938 #define NEC_VENID 0x1033
939 #define NEC_DEVID 0x0125
940 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
942 struct pci_dev *tdev = NULL;
943 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
944 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
945 if (tdev->bus == s2io_pdev->bus->parent)
953 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
955 * s2io_print_pci_mode -
957 static int s2io_print_pci_mode(struct s2io_nic *nic)
959 struct XENA_dev_config __iomem *bar0 = nic->bar0;
960 register u64 val64 = 0;
962 struct config_param *config = &nic->config;
964 val64 = readq(&bar0->pci_mode);
965 mode = (u8)GET_PCI_MODE(val64);
967 if ( val64 & PCI_MODE_UNKNOWN_MODE)
968 return -1; /* Unknown PCI mode */
970 config->bus_speed = bus_speed[mode];
972 if (s2io_on_nec_bridge(nic->pdev)) {
973 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
978 if (val64 & PCI_MODE_32_BITS) {
979 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
981 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
985 case PCI_MODE_PCI_33:
986 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
988 case PCI_MODE_PCI_66:
989 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
991 case PCI_MODE_PCIX_M1_66:
992 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
994 case PCI_MODE_PCIX_M1_100:
995 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
997 case PCI_MODE_PCIX_M1_133:
998 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1000 case PCI_MODE_PCIX_M2_66:
1001 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1003 case PCI_MODE_PCIX_M2_100:
1004 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1006 case PCI_MODE_PCIX_M2_133:
1007 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1010 return -1; /* Unsupported bus speed */
1017 * init_nic - Initialization of hardware
1018 * @nic: device peivate variable
1019 * Description: The function sequentially configures every block
1020 * of the H/W from their reset values.
1021 * Return Value: SUCCESS on success and
1022 * '-1' on failure (endian settings incorrect).
1025 static int init_nic(struct s2io_nic *nic)
1027 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1028 struct net_device *dev = nic->dev;
1029 register u64 val64 = 0;
1033 struct mac_info *mac_control;
1034 struct config_param *config;
1036 unsigned long long mem_share;
1039 mac_control = &nic->mac_control;
1040 config = &nic->config;
1042 /* to set the swapper controle on the card */
1043 if(s2io_set_swapper(nic)) {
1044 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1049 * Herc requires EOI to be removed from reset before XGXS, so..
1051 if (nic->device_type & XFRAME_II_DEVICE) {
1052 val64 = 0xA500000000ULL;
1053 writeq(val64, &bar0->sw_reset);
1055 val64 = readq(&bar0->sw_reset);
1058 /* Remove XGXS from reset state */
1060 writeq(val64, &bar0->sw_reset);
1062 val64 = readq(&bar0->sw_reset);
1064 /* Enable Receiving broadcasts */
1065 add = &bar0->mac_cfg;
1066 val64 = readq(&bar0->mac_cfg);
1067 val64 |= MAC_RMAC_BCAST_ENABLE;
1068 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1069 writel((u32) val64, add);
1070 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1071 writel((u32) (val64 >> 32), (add + 4));
1073 /* Read registers in all blocks */
1074 val64 = readq(&bar0->mac_int_mask);
1075 val64 = readq(&bar0->mc_int_mask);
1076 val64 = readq(&bar0->xgxs_int_mask);
1080 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1082 if (nic->device_type & XFRAME_II_DEVICE) {
1083 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1084 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1085 &bar0->dtx_control, UF);
1087 msleep(1); /* Necessary!! */
1091 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1092 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1093 &bar0->dtx_control, UF);
1094 val64 = readq(&bar0->dtx_control);
1099 /* Tx DMA Initialization */
1101 writeq(val64, &bar0->tx_fifo_partition_0);
1102 writeq(val64, &bar0->tx_fifo_partition_1);
1103 writeq(val64, &bar0->tx_fifo_partition_2);
1104 writeq(val64, &bar0->tx_fifo_partition_3);
1107 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1109 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1110 13) | vBIT(config->tx_cfg[i].fifo_priority,
1113 if (i == (config->tx_fifo_num - 1)) {
1120 writeq(val64, &bar0->tx_fifo_partition_0);
1124 writeq(val64, &bar0->tx_fifo_partition_1);
1128 writeq(val64, &bar0->tx_fifo_partition_2);
1132 writeq(val64, &bar0->tx_fifo_partition_3);
1138 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1139 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1141 if ((nic->device_type == XFRAME_I_DEVICE) &&
1142 (nic->pdev->revision < 4))
1143 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1145 val64 = readq(&bar0->tx_fifo_partition_0);
1146 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1147 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1150 * Initialization of Tx_PA_CONFIG register to ignore packet
1151 * integrity checking.
1153 val64 = readq(&bar0->tx_pa_cfg);
1154 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1155 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1156 writeq(val64, &bar0->tx_pa_cfg);
1158 /* Rx DMA intialization. */
1160 for (i = 0; i < config->rx_ring_num; i++) {
1162 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1165 writeq(val64, &bar0->rx_queue_priority);
1168 * Allocating equal share of memory to all the
1172 if (nic->device_type & XFRAME_II_DEVICE)
1177 for (i = 0; i < config->rx_ring_num; i++) {
1180 mem_share = (mem_size / config->rx_ring_num +
1181 mem_size % config->rx_ring_num);
1182 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1185 mem_share = (mem_size / config->rx_ring_num);
1186 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1189 mem_share = (mem_size / config->rx_ring_num);
1190 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1193 mem_share = (mem_size / config->rx_ring_num);
1194 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1197 mem_share = (mem_size / config->rx_ring_num);
1198 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1201 mem_share = (mem_size / config->rx_ring_num);
1202 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1205 mem_share = (mem_size / config->rx_ring_num);
1206 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1209 mem_share = (mem_size / config->rx_ring_num);
1210 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1214 writeq(val64, &bar0->rx_queue_cfg);
1217 * Filling Tx round robin registers
1218 * as per the number of FIFOs
1220 switch (config->tx_fifo_num) {
1222 val64 = 0x0000000000000000ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_0);
1224 writeq(val64, &bar0->tx_w_round_robin_1);
1225 writeq(val64, &bar0->tx_w_round_robin_2);
1226 writeq(val64, &bar0->tx_w_round_robin_3);
1227 writeq(val64, &bar0->tx_w_round_robin_4);
1230 val64 = 0x0000010000010000ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_0);
1232 val64 = 0x0100000100000100ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_1);
1234 val64 = 0x0001000001000001ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_2);
1236 val64 = 0x0000010000010000ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_3);
1238 val64 = 0x0100000000000000ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_4);
1242 val64 = 0x0001000102000001ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_0);
1244 val64 = 0x0001020000010001ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_1);
1246 val64 = 0x0200000100010200ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_2);
1248 val64 = 0x0001000102000001ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_3);
1250 val64 = 0x0001020000000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1254 val64 = 0x0001020300010200ULL;
1255 writeq(val64, &bar0->tx_w_round_robin_0);
1256 val64 = 0x0100000102030001ULL;
1257 writeq(val64, &bar0->tx_w_round_robin_1);
1258 val64 = 0x0200010000010203ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_2);
1260 val64 = 0x0001020001000001ULL;
1261 writeq(val64, &bar0->tx_w_round_robin_3);
1262 val64 = 0x0203000100000000ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_4);
1266 val64 = 0x0001000203000102ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_0);
1268 val64 = 0x0001020001030004ULL;
1269 writeq(val64, &bar0->tx_w_round_robin_1);
1270 val64 = 0x0001000203000102ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_2);
1272 val64 = 0x0001020001030004ULL;
1273 writeq(val64, &bar0->tx_w_round_robin_3);
1274 val64 = 0x0001000000000000ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_4);
1278 val64 = 0x0001020304000102ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_0);
1280 val64 = 0x0304050001020001ULL;
1281 writeq(val64, &bar0->tx_w_round_robin_1);
1282 val64 = 0x0203000100000102ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_2);
1284 val64 = 0x0304000102030405ULL;
1285 writeq(val64, &bar0->tx_w_round_robin_3);
1286 val64 = 0x0001000200000000ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_4);
1290 val64 = 0x0001020001020300ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_0);
1292 val64 = 0x0102030400010203ULL;
1293 writeq(val64, &bar0->tx_w_round_robin_1);
1294 val64 = 0x0405060001020001ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_2);
1296 val64 = 0x0304050000010200ULL;
1297 writeq(val64, &bar0->tx_w_round_robin_3);
1298 val64 = 0x0102030000000000ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_4);
1302 val64 = 0x0001020300040105ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_0);
1304 val64 = 0x0200030106000204ULL;
1305 writeq(val64, &bar0->tx_w_round_robin_1);
1306 val64 = 0x0103000502010007ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_2);
1308 val64 = 0x0304010002060500ULL;
1309 writeq(val64, &bar0->tx_w_round_robin_3);
1310 val64 = 0x0103020400000000ULL;
1311 writeq(val64, &bar0->tx_w_round_robin_4);
1315 /* Enable all configured Tx FIFO partitions */
1316 val64 = readq(&bar0->tx_fifo_partition_0);
1317 val64 |= (TX_FIFO_PARTITION_EN);
1318 writeq(val64, &bar0->tx_fifo_partition_0);
1320 /* Filling the Rx round robin registers as per the
1321 * number of Rings and steering based on QoS.
1323 switch (config->rx_ring_num) {
1325 val64 = 0x8080808080808080ULL;
1326 writeq(val64, &bar0->rts_qos_steering);
1329 val64 = 0x0000010000010000ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_0);
1331 val64 = 0x0100000100000100ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_1);
1333 val64 = 0x0001000001000001ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_2);
1335 val64 = 0x0000010000010000ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_3);
1337 val64 = 0x0100000000000000ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_4);
1340 val64 = 0x8080808040404040ULL;
1341 writeq(val64, &bar0->rts_qos_steering);
1344 val64 = 0x0001000102000001ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_0);
1346 val64 = 0x0001020000010001ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_1);
1348 val64 = 0x0200000100010200ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_2);
1350 val64 = 0x0001000102000001ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_3);
1352 val64 = 0x0001020000000000ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_4);
1355 val64 = 0x8080804040402020ULL;
1356 writeq(val64, &bar0->rts_qos_steering);
1359 val64 = 0x0001020300010200ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_0);
1361 val64 = 0x0100000102030001ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_1);
1363 val64 = 0x0200010000010203ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_2);
1365 val64 = 0x0001020001000001ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_3);
1367 val64 = 0x0203000100000000ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_4);
1370 val64 = 0x8080404020201010ULL;
1371 writeq(val64, &bar0->rts_qos_steering);
1374 val64 = 0x0001000203000102ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_0);
1376 val64 = 0x0001020001030004ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_1);
1378 val64 = 0x0001000203000102ULL;
1379 writeq(val64, &bar0->rx_w_round_robin_2);
1380 val64 = 0x0001020001030004ULL;
1381 writeq(val64, &bar0->rx_w_round_robin_3);
1382 val64 = 0x0001000000000000ULL;
1383 writeq(val64, &bar0->rx_w_round_robin_4);
1385 val64 = 0x8080404020201008ULL;
1386 writeq(val64, &bar0->rts_qos_steering);
1389 val64 = 0x0001020304000102ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_0);
1391 val64 = 0x0304050001020001ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_1);
1393 val64 = 0x0203000100000102ULL;
1394 writeq(val64, &bar0->rx_w_round_robin_2);
1395 val64 = 0x0304000102030405ULL;
1396 writeq(val64, &bar0->rx_w_round_robin_3);
1397 val64 = 0x0001000200000000ULL;
1398 writeq(val64, &bar0->rx_w_round_robin_4);
1400 val64 = 0x8080404020100804ULL;
1401 writeq(val64, &bar0->rts_qos_steering);
1404 val64 = 0x0001020001020300ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_0);
1406 val64 = 0x0102030400010203ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_1);
1408 val64 = 0x0405060001020001ULL;
1409 writeq(val64, &bar0->rx_w_round_robin_2);
1410 val64 = 0x0304050000010200ULL;
1411 writeq(val64, &bar0->rx_w_round_robin_3);
1412 val64 = 0x0102030000000000ULL;
1413 writeq(val64, &bar0->rx_w_round_robin_4);
1415 val64 = 0x8080402010080402ULL;
1416 writeq(val64, &bar0->rts_qos_steering);
1419 val64 = 0x0001020300040105ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_0);
1421 val64 = 0x0200030106000204ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_1);
1423 val64 = 0x0103000502010007ULL;
1424 writeq(val64, &bar0->rx_w_round_robin_2);
1425 val64 = 0x0304010002060500ULL;
1426 writeq(val64, &bar0->rx_w_round_robin_3);
1427 val64 = 0x0103020400000000ULL;
1428 writeq(val64, &bar0->rx_w_round_robin_4);
1430 val64 = 0x8040201008040201ULL;
1431 writeq(val64, &bar0->rts_qos_steering);
1437 for (i = 0; i < 8; i++)
1438 writeq(val64, &bar0->rts_frm_len_n[i]);
1440 /* Set the default rts frame length for the rings configured */
1441 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1442 for (i = 0 ; i < config->rx_ring_num ; i++)
1443 writeq(val64, &bar0->rts_frm_len_n[i]);
1445 /* Set the frame length for the configured rings
1446 * desired by the user
1448 for (i = 0; i < config->rx_ring_num; i++) {
1449 /* If rts_frm_len[i] == 0 then it is assumed that user not
1450 * specified frame length steering.
1451 * If the user provides the frame length then program
1452 * the rts_frm_len register for those values or else
1453 * leave it as it is.
1455 if (rts_frm_len[i] != 0) {
1456 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1457 &bar0->rts_frm_len_n[i]);
1461 /* Disable differentiated services steering logic */
1462 for (i = 0; i < 64; i++) {
1463 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1464 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1466 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1471 /* Program statistics memory */
1472 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1474 if (nic->device_type == XFRAME_II_DEVICE) {
1475 val64 = STAT_BC(0x320);
1476 writeq(val64, &bar0->stat_byte_cnt);
1480 * Initializing the sampling rate for the device to calculate the
1481 * bandwidth utilization.
1483 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1484 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1485 writeq(val64, &bar0->mac_link_util);
1489 * Initializing the Transmit and Receive Traffic Interrupt
1493 * TTI Initialization. Default Tx timer gets us about
1494 * 250 interrupts per sec. Continuous interrupts are enabled
1497 if (nic->device_type == XFRAME_II_DEVICE) {
1498 int count = (nic->config.bus_speed * 125)/2;
1499 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1502 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1504 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1505 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1506 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1507 if (use_continuous_tx_intrs)
1508 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1509 writeq(val64, &bar0->tti_data1_mem);
1511 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1512 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1513 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1514 writeq(val64, &bar0->tti_data2_mem);
1516 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1517 writeq(val64, &bar0->tti_command_mem);
1520 * Once the operation completes, the Strobe bit of the command
1521 * register will be reset. We poll for this particular condition
1522 * We wait for a maximum of 500ms for the operation to complete,
1523 * if it's not complete by then we return error.
1527 val64 = readq(&bar0->tti_command_mem);
1528 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1532 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1540 if (nic->config.bimodal) {
1542 for (k = 0; k < config->rx_ring_num; k++) {
1543 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1544 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1545 writeq(val64, &bar0->tti_command_mem);
1548 * Once the operation completes, the Strobe bit of the command
1549 * register will be reset. We poll for this particular condition
1550 * We wait for a maximum of 500ms for the operation to complete,
1551 * if it's not complete by then we return error.
1555 val64 = readq(&bar0->tti_command_mem);
1556 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1561 "%s: TTI init Failed\n",
1571 /* RTI Initialization */
1572 if (nic->device_type == XFRAME_II_DEVICE) {
1574 * Programmed to generate Apprx 500 Intrs per
1577 int count = (nic->config.bus_speed * 125)/4;
1578 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1580 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1582 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1583 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1584 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1586 writeq(val64, &bar0->rti_data1_mem);
1588 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1589 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1590 if (nic->intr_type == MSI_X)
1591 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1592 RTI_DATA2_MEM_RX_UFC_D(0x40));
1594 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1595 RTI_DATA2_MEM_RX_UFC_D(0x80));
1596 writeq(val64, &bar0->rti_data2_mem);
1598 for (i = 0; i < config->rx_ring_num; i++) {
1599 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1600 | RTI_CMD_MEM_OFFSET(i);
1601 writeq(val64, &bar0->rti_command_mem);
1604 * Once the operation completes, the Strobe bit of the
1605 * command register will be reset. We poll for this
1606 * particular condition. We wait for a maximum of 500ms
1607 * for the operation to complete, if it's not complete
1608 * by then we return error.
1612 val64 = readq(&bar0->rti_command_mem);
1613 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1617 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1628 * Initializing proper values as Pause threshold into all
1629 * the 8 Queues on Rx side.
1631 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1632 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1634 /* Disable RMAC PAD STRIPPING */
1635 add = &bar0->mac_cfg;
1636 val64 = readq(&bar0->mac_cfg);
1637 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1638 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1639 writel((u32) (val64), add);
1640 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1641 writel((u32) (val64 >> 32), (add + 4));
1642 val64 = readq(&bar0->mac_cfg);
1644 /* Enable FCS stripping by adapter */
1645 add = &bar0->mac_cfg;
1646 val64 = readq(&bar0->mac_cfg);
1647 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1648 if (nic->device_type == XFRAME_II_DEVICE)
1649 writeq(val64, &bar0->mac_cfg);
1651 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1652 writel((u32) (val64), add);
1653 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1654 writel((u32) (val64 >> 32), (add + 4));
1658 * Set the time value to be inserted in the pause frame
1659 * generated by xena.
1661 val64 = readq(&bar0->rmac_pause_cfg);
1662 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1663 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1664 writeq(val64, &bar0->rmac_pause_cfg);
1667 * Set the Threshold Limit for Generating the pause frame
1668 * If the amount of data in any Queue exceeds ratio of
1669 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1670 * pause frame is generated
1673 for (i = 0; i < 4; i++) {
1675 (((u64) 0xFF00 | nic->mac_control.
1676 mc_pause_threshold_q0q3)
1679 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1682 for (i = 0; i < 4; i++) {
1684 (((u64) 0xFF00 | nic->mac_control.
1685 mc_pause_threshold_q4q7)
1688 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1691 * TxDMA will stop Read request if the number of read split has
1692 * exceeded the limit pointed by shared_splits
1694 val64 = readq(&bar0->pic_control);
1695 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1696 writeq(val64, &bar0->pic_control);
1698 if (nic->config.bus_speed == 266) {
1699 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1700 writeq(0x0, &bar0->read_retry_delay);
1701 writeq(0x0, &bar0->write_retry_delay);
1705 * Programming the Herc to split every write transaction
1706 * that does not start on an ADB to reduce disconnects.
1708 if (nic->device_type == XFRAME_II_DEVICE) {
1709 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1710 MISC_LINK_STABILITY_PRD(3);
1711 writeq(val64, &bar0->misc_control);
1712 val64 = readq(&bar0->pic_control2);
1713 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1714 writeq(val64, &bar0->pic_control2);
1716 if (strstr(nic->product_name, "CX4")) {
1717 val64 = TMAC_AVG_IPG(0x17);
1718 writeq(val64, &bar0->tmac_avg_ipg);
1723 #define LINK_UP_DOWN_INTERRUPT 1
1724 #define MAC_RMAC_ERR_TIMER 2
1726 static int s2io_link_fault_indication(struct s2io_nic *nic)
1728 if (nic->intr_type != INTA)
1729 return MAC_RMAC_ERR_TIMER;
1730 if (nic->device_type == XFRAME_II_DEVICE)
1731 return LINK_UP_DOWN_INTERRUPT;
1733 return MAC_RMAC_ERR_TIMER;
1736 * do_s2io_write_bits - update alarm bits in alarm register
1737 * @value: alarm bits
1738 * @flag: interrupt status
1739 * @addr: address value
1740 * Description: update alarm bits in alarm register
1744 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1748 temp64 = readq(addr);
1750 if(flag == ENABLE_INTRS)
1751 temp64 &= ~((u64) value);
1753 temp64 |= ((u64) value);
1754 writeq(temp64, addr);
1757 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1759 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1760 register u64 gen_int_mask = 0;
1762 if (mask & TX_DMA_INTR) {
1764 gen_int_mask |= TXDMA_INT_M;
1766 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1767 TXDMA_PCC_INT | TXDMA_TTI_INT |
1768 TXDMA_LSO_INT | TXDMA_TPA_INT |
1769 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1771 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1772 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1773 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1774 &bar0->pfc_err_mask);
1776 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1777 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1778 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1780 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1781 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1782 PCC_N_SERR | PCC_6_COF_OV_ERR |
1783 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1784 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1785 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1787 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1788 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1790 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1791 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1792 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1793 flag, &bar0->lso_err_mask);
1795 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1796 flag, &bar0->tpa_err_mask);
1798 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1802 if (mask & TX_MAC_INTR) {
1803 gen_int_mask |= TXMAC_INT_M;
1804 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1805 &bar0->mac_int_mask);
1806 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1807 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1808 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1809 flag, &bar0->mac_tmac_err_mask);
1812 if (mask & TX_XGXS_INTR) {
1813 gen_int_mask |= TXXGXS_INT_M;
1814 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1815 &bar0->xgxs_int_mask);
1816 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1817 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1818 flag, &bar0->xgxs_txgxs_err_mask);
1821 if (mask & RX_DMA_INTR) {
1822 gen_int_mask |= RXDMA_INT_M;
1823 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1824 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1825 flag, &bar0->rxdma_int_mask);
1826 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1827 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1828 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1829 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1830 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1831 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1832 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1833 &bar0->prc_pcix_err_mask);
1834 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1835 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1836 &bar0->rpa_err_mask);
1837 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1838 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1839 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1840 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1841 flag, &bar0->rda_err_mask);
1842 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1843 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1844 flag, &bar0->rti_err_mask);
1847 if (mask & RX_MAC_INTR) {
1848 gen_int_mask |= RXMAC_INT_M;
1849 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1850 &bar0->mac_int_mask);
1851 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1852 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1853 RMAC_DOUBLE_ECC_ERR |
1854 RMAC_LINK_STATE_CHANGE_INT,
1855 flag, &bar0->mac_rmac_err_mask);
1858 if (mask & RX_XGXS_INTR)
1860 gen_int_mask |= RXXGXS_INT_M;
1861 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1862 &bar0->xgxs_int_mask);
1863 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1864 &bar0->xgxs_rxgxs_err_mask);
1867 if (mask & MC_INTR) {
1868 gen_int_mask |= MC_INT_M;
1869 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1870 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1871 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1872 &bar0->mc_err_mask);
1874 nic->general_int_mask = gen_int_mask;
1876 /* Remove this line when alarm interrupts are enabled */
1877 nic->general_int_mask = 0;
1880 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1881 * @nic: device private variable,
1882 * @mask: A mask indicating which Intr block must be modified and,
1883 * @flag: A flag indicating whether to enable or disable the Intrs.
1884 * Description: This function will either disable or enable the interrupts
1885 * depending on the flag argument. The mask argument can be used to
1886 * enable/disable any Intr block.
1887 * Return Value: NONE.
1890 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1892 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1893 register u64 temp64 = 0, intr_mask = 0;
1895 intr_mask = nic->general_int_mask;
1897 /* Top level interrupt classification */
1898 /* PIC Interrupts */
1899 if (mask & TX_PIC_INTR) {
1900 /* Enable PIC Intrs in the general intr mask register */
1901 intr_mask |= TXPIC_INT_M;
1902 if (flag == ENABLE_INTRS) {
1904 * If Hercules adapter enable GPIO otherwise
1905 * disable all PCIX, Flash, MDIO, IIC and GPIO
1906 * interrupts for now.
1909 if (s2io_link_fault_indication(nic) ==
1910 LINK_UP_DOWN_INTERRUPT ) {
1911 do_s2io_write_bits(PIC_INT_GPIO, flag,
1912 &bar0->pic_int_mask);
1913 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1914 &bar0->gpio_int_mask);
1916 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1917 } else if (flag == DISABLE_INTRS) {
1919 * Disable PIC Intrs in the general
1920 * intr mask register
1922 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1926 /* Tx traffic interrupts */
1927 if (mask & TX_TRAFFIC_INTR) {
1928 intr_mask |= TXTRAFFIC_INT_M;
1929 if (flag == ENABLE_INTRS) {
1931 * Enable all the Tx side interrupts
1932 * writing 0 Enables all 64 TX interrupt levels
1934 writeq(0x0, &bar0->tx_traffic_mask);
1935 } else if (flag == DISABLE_INTRS) {
1937 * Disable Tx Traffic Intrs in the general intr mask
1940 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1944 /* Rx traffic interrupts */
1945 if (mask & RX_TRAFFIC_INTR) {
1946 intr_mask |= RXTRAFFIC_INT_M;
1947 if (flag == ENABLE_INTRS) {
1948 /* writing 0 Enables all 8 RX interrupt levels */
1949 writeq(0x0, &bar0->rx_traffic_mask);
1950 } else if (flag == DISABLE_INTRS) {
1952 * Disable Rx Traffic Intrs in the general intr mask
1955 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1959 temp64 = readq(&bar0->general_int_mask);
1960 if (flag == ENABLE_INTRS)
1961 temp64 &= ~((u64) intr_mask);
1963 temp64 = DISABLE_ALL_INTRS;
1964 writeq(temp64, &bar0->general_int_mask);
1966 nic->general_int_mask = readq(&bar0->general_int_mask);
1970 * verify_pcc_quiescent- Checks for PCC quiescent state
1971 * Return: 1 If PCC is quiescence
1972 * 0 If PCC is not quiescence
1974 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1977 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1978 u64 val64 = readq(&bar0->adapter_status);
1980 herc = (sp->device_type == XFRAME_II_DEVICE);
1982 if (flag == FALSE) {
1983 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1984 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1987 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1991 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1992 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1993 ADAPTER_STATUS_RMAC_PCC_IDLE))
1996 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1997 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2005 * verify_xena_quiescence - Checks whether the H/W is ready
2006 * Description: Returns whether the H/W is ready to go or not. Depending
2007 * on whether adapter enable bit was written or not the comparison
2008 * differs and the calling function passes the input argument flag to
2010 * Return: 1 If xena is quiescence
2011 * 0 If Xena is not quiescence
2014 static int verify_xena_quiescence(struct s2io_nic *sp)
2017 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2018 u64 val64 = readq(&bar0->adapter_status);
2019 mode = s2io_verify_pci_mode(sp);
2021 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2022 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2025 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2026 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2029 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2030 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2033 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2034 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2037 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2038 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2041 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2042 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2045 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2046 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2049 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2050 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2055 * In PCI 33 mode, the P_PLL is not used, and therefore,
2056 * the the P_PLL_LOCK bit in the adapter_status register will
2059 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2060 sp->device_type == XFRAME_II_DEVICE && mode !=
2062 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2065 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2066 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2067 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2074 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2075 * @sp: Pointer to device specifc structure
2077 * New procedure to clear mac address reading problems on Alpha platforms
2081 static void fix_mac_address(struct s2io_nic * sp)
2083 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2087 while (fix_mac[i] != END_SIGN) {
2088 writeq(fix_mac[i++], &bar0->gpio_control);
2090 val64 = readq(&bar0->gpio_control);
2095 * start_nic - Turns the device on
2096 * @nic : device private variable.
2098 * This function actually turns the device on. Before this function is
2099 * called,all Registers are configured from their reset states
2100 * and shared memory is allocated but the NIC is still quiescent. On
2101 * calling this function, the device interrupts are cleared and the NIC is
2102 * literally switched on by writing into the adapter control register.
2104 * SUCCESS on success and -1 on failure.
2107 static int start_nic(struct s2io_nic *nic)
2109 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2110 struct net_device *dev = nic->dev;
2111 register u64 val64 = 0;
2113 struct mac_info *mac_control;
2114 struct config_param *config;
2116 mac_control = &nic->mac_control;
2117 config = &nic->config;
2119 /* PRC Initialization and configuration */
2120 for (i = 0; i < config->rx_ring_num; i++) {
2121 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2122 &bar0->prc_rxd0_n[i]);
2124 val64 = readq(&bar0->prc_ctrl_n[i]);
2125 if (nic->config.bimodal)
2126 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2127 if (nic->rxd_mode == RXD_MODE_1)
2128 val64 |= PRC_CTRL_RC_ENABLED;
2130 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2131 if (nic->device_type == XFRAME_II_DEVICE)
2132 val64 |= PRC_CTRL_GROUP_READS;
2133 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2134 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2135 writeq(val64, &bar0->prc_ctrl_n[i]);
2138 if (nic->rxd_mode == RXD_MODE_3B) {
2139 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2140 val64 = readq(&bar0->rx_pa_cfg);
2141 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2142 writeq(val64, &bar0->rx_pa_cfg);
2145 if (vlan_tag_strip == 0) {
2146 val64 = readq(&bar0->rx_pa_cfg);
2147 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2148 writeq(val64, &bar0->rx_pa_cfg);
2149 vlan_strip_flag = 0;
2153 * Enabling MC-RLDRAM. After enabling the device, we timeout
2154 * for around 100ms, which is approximately the time required
2155 * for the device to be ready for operation.
2157 val64 = readq(&bar0->mc_rldram_mrs);
2158 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2159 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2160 val64 = readq(&bar0->mc_rldram_mrs);
2162 msleep(100); /* Delay by around 100 ms. */
2164 /* Enabling ECC Protection. */
2165 val64 = readq(&bar0->adapter_control);
2166 val64 &= ~ADAPTER_ECC_EN;
2167 writeq(val64, &bar0->adapter_control);
2170 * Verify if the device is ready to be enabled, if so enable
2173 val64 = readq(&bar0->adapter_status);
2174 if (!verify_xena_quiescence(nic)) {
2175 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2176 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2177 (unsigned long long) val64);
2182 * With some switches, link might be already up at this point.
2183 * Because of this weird behavior, when we enable laser,
2184 * we may not get link. We need to handle this. We cannot
2185 * figure out which switch is misbehaving. So we are forced to
2186 * make a global change.
2189 /* Enabling Laser. */
2190 val64 = readq(&bar0->adapter_control);
2191 val64 |= ADAPTER_EOI_TX_ON;
2192 writeq(val64, &bar0->adapter_control);
2194 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2196 * Dont see link state interrupts initally on some switches,
2197 * so directly scheduling the link state task here.
2199 schedule_work(&nic->set_link_task);
2201 /* SXE-002: Initialize link and activity LED */
2202 subid = nic->pdev->subsystem_device;
2203 if (((subid & 0xFF) >= 0x07) &&
2204 (nic->device_type == XFRAME_I_DEVICE)) {
2205 val64 = readq(&bar0->gpio_control);
2206 val64 |= 0x0000800000000000ULL;
2207 writeq(val64, &bar0->gpio_control);
2208 val64 = 0x0411040400000000ULL;
2209 writeq(val64, (void __iomem *)bar0 + 0x2700);
2215 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2217 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2218 TxD *txdlp, int get_off)
2220 struct s2io_nic *nic = fifo_data->nic;
2221 struct sk_buff *skb;
2226 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2227 pci_unmap_single(nic->pdev, (dma_addr_t)
2228 txds->Buffer_Pointer, sizeof(u64),
2233 skb = (struct sk_buff *) ((unsigned long)
2234 txds->Host_Control);
2236 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2239 pci_unmap_single(nic->pdev, (dma_addr_t)
2240 txds->Buffer_Pointer,
2241 skb->len - skb->data_len,
2243 frg_cnt = skb_shinfo(skb)->nr_frags;
2246 for (j = 0; j < frg_cnt; j++, txds++) {
2247 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2248 if (!txds->Buffer_Pointer)
2250 pci_unmap_page(nic->pdev, (dma_addr_t)
2251 txds->Buffer_Pointer,
2252 frag->size, PCI_DMA_TODEVICE);
2255 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2260 * free_tx_buffers - Free all queued Tx buffers
2261 * @nic : device private variable.
2263 * Free all queued Tx buffers.
2264 * Return Value: void
2267 static void free_tx_buffers(struct s2io_nic *nic)
2269 struct net_device *dev = nic->dev;
2270 struct sk_buff *skb;
2273 struct mac_info *mac_control;
2274 struct config_param *config;
2277 mac_control = &nic->mac_control;
2278 config = &nic->config;
2280 for (i = 0; i < config->tx_fifo_num; i++) {
2281 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2282 txdp = (struct TxD *) \
2283 mac_control->fifos[i].list_info[j].list_virt_addr;
2284 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2286 nic->mac_control.stats_info->sw_stat.mem_freed
2293 "%s:forcibly freeing %d skbs on FIFO%d\n",
2295 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2296 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2301 * stop_nic - To stop the nic
2302 * @nic ; device private variable.
2304 * This function does exactly the opposite of what the start_nic()
2305 * function does. This function is called to stop the device.
2310 static void stop_nic(struct s2io_nic *nic)
2312 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2313 register u64 val64 = 0;
2315 struct mac_info *mac_control;
2316 struct config_param *config;
2318 mac_control = &nic->mac_control;
2319 config = &nic->config;
2321 /* Disable all interrupts */
2322 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2323 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2324 interruptible |= TX_PIC_INTR;
2325 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2327 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2328 val64 = readq(&bar0->adapter_control);
2329 val64 &= ~(ADAPTER_CNTL_EN);
2330 writeq(val64, &bar0->adapter_control);
2334 * fill_rx_buffers - Allocates the Rx side skbs
2335 * @nic: device private variable
2336 * @ring_no: ring number
2338 * The function allocates Rx side skbs and puts the physical
2339 * address of these buffers into the RxD buffer pointers, so that the NIC
2340 * can DMA the received frame into these locations.
2341 * The NIC supports 3 receive modes, viz
2343 * 2. three buffer and
2344 * 3. Five buffer modes.
2345 * Each mode defines how many fragments the received frame will be split
2346 * up into by the NIC. The frame is split into L3 header, L4 Header,
2347 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2348 * is split into 3 fragments. As of now only single buffer mode is
2351 * SUCCESS on success or an appropriate -ve value on failure.
2354 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2356 struct net_device *dev = nic->dev;
2357 struct sk_buff *skb;
2359 int off, off1, size, block_no, block_no1;
2362 struct mac_info *mac_control;
2363 struct config_param *config;
2366 unsigned long flags;
2367 struct RxD_t *first_rxdp = NULL;
2368 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2371 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2373 mac_control = &nic->mac_control;
2374 config = &nic->config;
2375 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2376 atomic_read(&nic->rx_bufs_left[ring_no]);
2378 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2379 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2380 while (alloc_tab < alloc_cnt) {
2381 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2383 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2385 rxdp = mac_control->rings[ring_no].
2386 rx_blocks[block_no].rxds[off].virt_addr;
2388 if ((block_no == block_no1) && (off == off1) &&
2389 (rxdp->Host_Control)) {
2390 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2392 DBG_PRINT(INTR_DBG, " info equated\n");
2395 if (off && (off == rxd_count[nic->rxd_mode])) {
2396 mac_control->rings[ring_no].rx_curr_put_info.
2398 if (mac_control->rings[ring_no].rx_curr_put_info.
2399 block_index == mac_control->rings[ring_no].
2401 mac_control->rings[ring_no].rx_curr_put_info.
2403 block_no = mac_control->rings[ring_no].
2404 rx_curr_put_info.block_index;
2405 if (off == rxd_count[nic->rxd_mode])
2407 mac_control->rings[ring_no].rx_curr_put_info.
2409 rxdp = mac_control->rings[ring_no].
2410 rx_blocks[block_no].block_virt_addr;
2411 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2415 spin_lock_irqsave(&nic->put_lock, flags);
2416 mac_control->rings[ring_no].put_pos =
2417 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2418 spin_unlock_irqrestore(&nic->put_lock, flags);
2420 mac_control->rings[ring_no].put_pos =
2421 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2423 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2424 ((nic->rxd_mode == RXD_MODE_3B) &&
2425 (rxdp->Control_2 & BIT(0)))) {
2426 mac_control->rings[ring_no].rx_curr_put_info.
2430 /* calculate size of skb based on ring mode */
2431 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2432 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2433 if (nic->rxd_mode == RXD_MODE_1)
2434 size += NET_IP_ALIGN;
2436 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2439 skb = dev_alloc_skb(size);
2441 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2442 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2445 first_rxdp->Control_1 |= RXD_OWN_XENA;
2447 nic->mac_control.stats_info->sw_stat. \
2448 mem_alloc_fail_cnt++;
2451 nic->mac_control.stats_info->sw_stat.mem_allocated
2453 if (nic->rxd_mode == RXD_MODE_1) {
2454 /* 1 buffer mode - normal operation mode */
2455 rxdp1 = (struct RxD1*)rxdp;
2456 memset(rxdp, 0, sizeof(struct RxD1));
2457 skb_reserve(skb, NET_IP_ALIGN);
2458 rxdp1->Buffer0_ptr = pci_map_single
2459 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2460 PCI_DMA_FROMDEVICE);
2461 if( (rxdp1->Buffer0_ptr == 0) ||
2462 (rxdp1->Buffer0_ptr ==
2464 goto pci_map_failed;
2467 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2469 } else if (nic->rxd_mode == RXD_MODE_3B) {
2472 * 2 buffer mode provides 128
2473 * byte aligned receive buffers.
2476 rxdp3 = (struct RxD3*)rxdp;
2477 /* save buffer pointers to avoid frequent dma mapping */
2478 Buffer0_ptr = rxdp3->Buffer0_ptr;
2479 Buffer1_ptr = rxdp3->Buffer1_ptr;
2480 memset(rxdp, 0, sizeof(struct RxD3));
2481 /* restore the buffer pointers for dma sync*/
2482 rxdp3->Buffer0_ptr = Buffer0_ptr;
2483 rxdp3->Buffer1_ptr = Buffer1_ptr;
2485 ba = &mac_control->rings[ring_no].ba[block_no][off];
2486 skb_reserve(skb, BUF0_LEN);
2487 tmp = (u64)(unsigned long) skb->data;
2490 skb->data = (void *) (unsigned long)tmp;
2491 skb_reset_tail_pointer(skb);
2493 if (!(rxdp3->Buffer0_ptr))
2494 rxdp3->Buffer0_ptr =
2495 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2496 PCI_DMA_FROMDEVICE);
2498 pci_dma_sync_single_for_device(nic->pdev,
2499 (dma_addr_t) rxdp3->Buffer0_ptr,
2500 BUF0_LEN, PCI_DMA_FROMDEVICE);
2501 if( (rxdp3->Buffer0_ptr == 0) ||
2502 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2503 goto pci_map_failed;
2505 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2506 if (nic->rxd_mode == RXD_MODE_3B) {
2507 /* Two buffer mode */
2510 * Buffer2 will have L3/L4 header plus
2513 rxdp3->Buffer2_ptr = pci_map_single
2514 (nic->pdev, skb->data, dev->mtu + 4,
2515 PCI_DMA_FROMDEVICE);
2517 if( (rxdp3->Buffer2_ptr == 0) ||
2518 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2519 goto pci_map_failed;
2521 rxdp3->Buffer1_ptr =
2522 pci_map_single(nic->pdev,
2524 PCI_DMA_FROMDEVICE);
2525 if( (rxdp3->Buffer1_ptr == 0) ||
2526 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2529 (dma_addr_t)rxdp3->Buffer2_ptr,
2531 PCI_DMA_FROMDEVICE);
2532 goto pci_map_failed;
2534 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2535 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2538 rxdp->Control_2 |= BIT(0);
2540 rxdp->Host_Control = (unsigned long) (skb);
2541 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2542 rxdp->Control_1 |= RXD_OWN_XENA;
2544 if (off == (rxd_count[nic->rxd_mode] + 1))
2546 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2548 rxdp->Control_2 |= SET_RXD_MARKER;
2549 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2552 first_rxdp->Control_1 |= RXD_OWN_XENA;
2556 atomic_inc(&nic->rx_bufs_left[ring_no]);
2561 /* Transfer ownership of first descriptor to adapter just before
2562 * exiting. Before that, use memory barrier so that ownership
2563 * and other fields are seen by adapter correctly.
2567 first_rxdp->Control_1 |= RXD_OWN_XENA;
2572 stats->pci_map_fail_cnt++;
2573 stats->mem_freed += skb->truesize;
2574 dev_kfree_skb_irq(skb);
2578 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2580 struct net_device *dev = sp->dev;
2582 struct sk_buff *skb;
2584 struct mac_info *mac_control;
2589 mac_control = &sp->mac_control;
2590 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2591 rxdp = mac_control->rings[ring_no].
2592 rx_blocks[blk].rxds[j].virt_addr;
2593 skb = (struct sk_buff *)
2594 ((unsigned long) rxdp->Host_Control);
2598 if (sp->rxd_mode == RXD_MODE_1) {
2599 rxdp1 = (struct RxD1*)rxdp;
2600 pci_unmap_single(sp->pdev, (dma_addr_t)
2603 HEADER_ETHERNET_II_802_3_SIZE
2604 + HEADER_802_2_SIZE +
2606 PCI_DMA_FROMDEVICE);
2607 memset(rxdp, 0, sizeof(struct RxD1));
2608 } else if(sp->rxd_mode == RXD_MODE_3B) {
2609 rxdp3 = (struct RxD3*)rxdp;
2610 ba = &mac_control->rings[ring_no].
2612 pci_unmap_single(sp->pdev, (dma_addr_t)
2615 PCI_DMA_FROMDEVICE);
2616 pci_unmap_single(sp->pdev, (dma_addr_t)
2619 PCI_DMA_FROMDEVICE);
2620 pci_unmap_single(sp->pdev, (dma_addr_t)
2623 PCI_DMA_FROMDEVICE);
2624 memset(rxdp, 0, sizeof(struct RxD3));
2626 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2628 atomic_dec(&sp->rx_bufs_left[ring_no]);
2633 * free_rx_buffers - Frees all Rx buffers
2634 * @sp: device private variable.
2636 * This function will free all Rx buffers allocated by host.
2641 static void free_rx_buffers(struct s2io_nic *sp)
2643 struct net_device *dev = sp->dev;
2644 int i, blk = 0, buf_cnt = 0;
2645 struct mac_info *mac_control;
2646 struct config_param *config;
2648 mac_control = &sp->mac_control;
2649 config = &sp->config;
2651 for (i = 0; i < config->rx_ring_num; i++) {
2652 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2653 free_rxd_blk(sp,i,blk);
2655 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2656 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2657 mac_control->rings[i].rx_curr_put_info.offset = 0;
2658 mac_control->rings[i].rx_curr_get_info.offset = 0;
2659 atomic_set(&sp->rx_bufs_left[i], 0);
2660 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2661 dev->name, buf_cnt, i);
2666 * s2io_poll - Rx interrupt handler for NAPI support
2667 * @napi : pointer to the napi structure.
2668 * @budget : The number of packets that were budgeted to be processed
2669 * during one pass through the 'Poll" function.
2671 * Comes into picture only if NAPI support has been incorporated. It does
2672 * the same thing that rx_intr_handler does, but not in a interrupt context
2673 * also It will process only a given number of packets.
2675 * 0 on success and 1 if there are No Rx packets to be processed.
2678 static int s2io_poll(struct napi_struct *napi, int budget)
2680 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2681 struct net_device *dev = nic->dev;
2682 int pkt_cnt = 0, org_pkts_to_process;
2683 struct mac_info *mac_control;
2684 struct config_param *config;
2685 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2688 atomic_inc(&nic->isr_cnt);
2689 mac_control = &nic->mac_control;
2690 config = &nic->config;
2692 nic->pkts_to_process = budget;
2693 org_pkts_to_process = nic->pkts_to_process;
2695 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2696 readl(&bar0->rx_traffic_int);
2698 for (i = 0; i < config->rx_ring_num; i++) {
2699 rx_intr_handler(&mac_control->rings[i]);
2700 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2701 if (!nic->pkts_to_process) {
2702 /* Quota for the current iteration has been met */
2707 netif_rx_complete(dev, napi);
2709 for (i = 0; i < config->rx_ring_num; i++) {
2710 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2711 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2712 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2716 /* Re enable the Rx interrupts. */
2717 writeq(0x0, &bar0->rx_traffic_mask);
2718 readl(&bar0->rx_traffic_mask);
2719 atomic_dec(&nic->isr_cnt);
2723 for (i = 0; i < config->rx_ring_num; i++) {
2724 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2725 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2726 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2730 atomic_dec(&nic->isr_cnt);
2734 #ifdef CONFIG_NET_POLL_CONTROLLER
2736 * s2io_netpoll - netpoll event handler entry point
2737 * @dev : pointer to the device structure.
2739 * This function will be called by upper layer to check for events on the
2740 * interface in situations where interrupts are disabled. It is used for
2741 * specific in-kernel networking tasks, such as remote consoles and kernel
2742 * debugging over the network (example netdump in RedHat).
2744 static void s2io_netpoll(struct net_device *dev)
2746 struct s2io_nic *nic = dev->priv;
2747 struct mac_info *mac_control;
2748 struct config_param *config;
2749 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2750 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2753 if (pci_channel_offline(nic->pdev))
2756 disable_irq(dev->irq);
2758 atomic_inc(&nic->isr_cnt);
2759 mac_control = &nic->mac_control;
2760 config = &nic->config;
2762 writeq(val64, &bar0->rx_traffic_int);
2763 writeq(val64, &bar0->tx_traffic_int);
2765 /* we need to free up the transmitted skbufs or else netpoll will
2766 * run out of skbs and will fail and eventually netpoll application such
2767 * as netdump will fail.
2769 for (i = 0; i < config->tx_fifo_num; i++)
2770 tx_intr_handler(&mac_control->fifos[i]);
2772 /* check for received packet and indicate up to network */
2773 for (i = 0; i < config->rx_ring_num; i++)
2774 rx_intr_handler(&mac_control->rings[i]);
2776 for (i = 0; i < config->rx_ring_num; i++) {
2777 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2778 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2779 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2783 atomic_dec(&nic->isr_cnt);
2784 enable_irq(dev->irq);
2790 * rx_intr_handler - Rx interrupt handler
2791 * @nic: device private variable.
2793 * If the interrupt is because of a received frame or if the
2794 * receive ring contains fresh as yet un-processed frames,this function is
2795 * called. It picks out the RxD at which place the last Rx processing had
2796 * stopped and sends the skb to the OSM's Rx handler and then increments
2801 static void rx_intr_handler(struct ring_info *ring_data)
2803 struct s2io_nic *nic = ring_data->nic;
2804 struct net_device *dev = (struct net_device *) nic->dev;
2805 int get_block, put_block, put_offset;
2806 struct rx_curr_get_info get_info, put_info;
2808 struct sk_buff *skb;
2814 spin_lock(&nic->rx_lock);
2815 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2816 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2817 __FUNCTION__, dev->name);
2818 spin_unlock(&nic->rx_lock);
2822 get_info = ring_data->rx_curr_get_info;
2823 get_block = get_info.block_index;
2824 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2825 put_block = put_info.block_index;
2826 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2828 spin_lock(&nic->put_lock);
2829 put_offset = ring_data->put_pos;
2830 spin_unlock(&nic->put_lock);
2832 put_offset = ring_data->put_pos;
2834 while (RXD_IS_UP2DT(rxdp)) {
2836 * If your are next to put index then it's
2837 * FIFO full condition
2839 if ((get_block == put_block) &&
2840 (get_info.offset + 1) == put_info.offset) {
2841 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2844 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2846 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2848 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2849 spin_unlock(&nic->rx_lock);
2852 if (nic->rxd_mode == RXD_MODE_1) {
2853 rxdp1 = (struct RxD1*)rxdp;
2854 pci_unmap_single(nic->pdev, (dma_addr_t)
2857 HEADER_ETHERNET_II_802_3_SIZE +
2860 PCI_DMA_FROMDEVICE);
2861 } else if (nic->rxd_mode == RXD_MODE_3B) {
2862 rxdp3 = (struct RxD3*)rxdp;
2863 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2865 BUF0_LEN, PCI_DMA_FROMDEVICE);
2866 pci_unmap_single(nic->pdev, (dma_addr_t)
2869 PCI_DMA_FROMDEVICE);
2871 prefetch(skb->data);
2872 rx_osm_handler(ring_data, rxdp);
2874 ring_data->rx_curr_get_info.offset = get_info.offset;
2875 rxdp = ring_data->rx_blocks[get_block].
2876 rxds[get_info.offset].virt_addr;
2877 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2878 get_info.offset = 0;
2879 ring_data->rx_curr_get_info.offset = get_info.offset;
2881 if (get_block == ring_data->block_count)
2883 ring_data->rx_curr_get_info.block_index = get_block;
2884 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2887 nic->pkts_to_process -= 1;
2888 if ((napi) && (!nic->pkts_to_process))
2891 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2895 /* Clear all LRO sessions before exiting */
2896 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2897 struct lro *lro = &nic->lro0_n[i];
2899 update_L3L4_header(nic, lro);
2900 queue_rx_frame(lro->parent);
2901 clear_lro_session(lro);
2906 spin_unlock(&nic->rx_lock);
2910 * tx_intr_handler - Transmit interrupt handler
2911 * @nic : device private variable
2913 * If an interrupt was raised to indicate DMA complete of the
2914 * Tx packet, this function is called. It identifies the last TxD
2915 * whose buffer was freed and frees all skbs whose data have already
2916 * DMA'ed into the NICs internal memory.
2921 static void tx_intr_handler(struct fifo_info *fifo_data)
2923 struct s2io_nic *nic = fifo_data->nic;
2924 struct net_device *dev = (struct net_device *) nic->dev;
2925 struct tx_curr_get_info get_info, put_info;
2926 struct sk_buff *skb;
2930 get_info = fifo_data->tx_curr_get_info;
2931 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2932 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2934 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2935 (get_info.offset != put_info.offset) &&
2936 (txdlp->Host_Control)) {
2937 /* Check for TxD errors */
2938 if (txdlp->Control_1 & TXD_T_CODE) {
2939 unsigned long long err;
2940 err = txdlp->Control_1 & TXD_T_CODE;
2942 nic->mac_control.stats_info->sw_stat.
2946 /* update t_code statistics */
2947 err_mask = err >> 48;
2950 nic->mac_control.stats_info->sw_stat.
2955 nic->mac_control.stats_info->sw_stat.
2956 tx_desc_abort_cnt++;
2960 nic->mac_control.stats_info->sw_stat.
2961 tx_parity_err_cnt++;
2965 nic->mac_control.stats_info->sw_stat.
2970 nic->mac_control.stats_info->sw_stat.
2971 tx_list_proc_err_cnt++;
2976 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2978 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2980 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2984 /* Updating the statistics block */
2985 nic->stats.tx_bytes += skb->len;
2986 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2987 dev_kfree_skb_irq(skb);
2990 if (get_info.offset == get_info.fifo_len + 1)
2991 get_info.offset = 0;
2992 txdlp = (struct TxD *) fifo_data->list_info
2993 [get_info.offset].list_virt_addr;
2994 fifo_data->tx_curr_get_info.offset =
2998 spin_lock(&nic->tx_lock);
2999 if (netif_queue_stopped(dev))
3000 netif_wake_queue(dev);
3001 spin_unlock(&nic->tx_lock);
3005 * s2io_mdio_write - Function to write in to MDIO registers
3006 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3007 * @addr : address value
3008 * @value : data value
3009 * @dev : pointer to net_device structure
3011 * This function is used to write values to the MDIO registers
3014 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3017 struct s2io_nic *sp = dev->priv;
3018 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3020 //address transaction
3021 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3022 | MDIO_MMD_DEV_ADDR(mmd_type)
3023 | MDIO_MMS_PRT_ADDR(0x0);
3024 writeq(val64, &bar0->mdio_control);
3025 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3026 writeq(val64, &bar0->mdio_control);
3031 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3032 | MDIO_MMD_DEV_ADDR(mmd_type)
3033 | MDIO_MMS_PRT_ADDR(0x0)
3034 | MDIO_MDIO_DATA(value)
3035 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3036 writeq(val64, &bar0->mdio_control);
3037 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3038 writeq(val64, &bar0->mdio_control);
3042 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3043 | MDIO_MMD_DEV_ADDR(mmd_type)
3044 | MDIO_MMS_PRT_ADDR(0x0)
3045 | MDIO_OP(MDIO_OP_READ_TRANS);
3046 writeq(val64, &bar0->mdio_control);
3047 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3048 writeq(val64, &bar0->mdio_control);
3054 * s2io_mdio_read - Function to write in to MDIO registers
3055 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3056 * @addr : address value
3057 * @dev : pointer to net_device structure
3059 * This function is used to read values to the MDIO registers
3062 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3066 struct s2io_nic *sp = dev->priv;
3067 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3069 /* address transaction */
3070 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3071 | MDIO_MMD_DEV_ADDR(mmd_type)
3072 | MDIO_MMS_PRT_ADDR(0x0);
3073 writeq(val64, &bar0->mdio_control);
3074 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3075 writeq(val64, &bar0->mdio_control);
3078 /* Data transaction */
3080 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3081 | MDIO_MMD_DEV_ADDR(mmd_type)
3082 | MDIO_MMS_PRT_ADDR(0x0)
3083 | MDIO_OP(MDIO_OP_READ_TRANS);
3084 writeq(val64, &bar0->mdio_control);
3085 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3086 writeq(val64, &bar0->mdio_control);
3089 /* Read the value from regs */
3090 rval64 = readq(&bar0->mdio_control);
3091 rval64 = rval64 & 0xFFFF0000;
3092 rval64 = rval64 >> 16;
3096 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3097 * @counter : couter value to be updated
3098 * @flag : flag to indicate the status
3099 * @type : counter type
3101 * This function is to check the status of the xpak counters value
3105 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3110 for(i = 0; i <index; i++)
3115 *counter = *counter + 1;
3116 val64 = *regs_stat & mask;
3117 val64 = val64 >> (index * 0x2);
3124 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3125 "service. Excessive temperatures may "
3126 "result in premature transceiver "
3130 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3131 "service Excessive bias currents may "
3132 "indicate imminent laser diode "
3136 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3137 "service Excessive laser output "
3138 "power may saturate far-end "
3142 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3147 val64 = val64 << (index * 0x2);
3148 *regs_stat = (*regs_stat & (~mask)) | (val64);
3151 *regs_stat = *regs_stat & (~mask);
3156 * s2io_updt_xpak_counter - Function to update the xpak counters
3157 * @dev : pointer to net_device struct
3159 * This function is to upate the status of the xpak counters value
3162 static void s2io_updt_xpak_counter(struct net_device *dev)
3170 struct s2io_nic *sp = dev->priv;
3171 struct stat_block *stat_info = sp->mac_control.stats_info;
3173 /* Check the communication with the MDIO slave */
3176 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3177 if((val64 == 0xFFFF) || (val64 == 0x0000))
3179 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3180 "Returned %llx\n", (unsigned long long)val64);
3184 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3187 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3188 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3189 (unsigned long long)val64);
3193 /* Loading the DOM register to MDIO register */
3195 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3196 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3198 /* Reading the Alarm flags */
3201 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3203 flag = CHECKBIT(val64, 0x7);
3205 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3206 &stat_info->xpak_stat.xpak_regs_stat,
3209 if(CHECKBIT(val64, 0x6))
3210 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3212 flag = CHECKBIT(val64, 0x3);
3214 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3215 &stat_info->xpak_stat.xpak_regs_stat,
3218 if(CHECKBIT(val64, 0x2))
3219 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3221 flag = CHECKBIT(val64, 0x1);
3223 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3224 &stat_info->xpak_stat.xpak_regs_stat,
3227 if(CHECKBIT(val64, 0x0))
3228 stat_info->xpak_stat.alarm_laser_output_power_low++;
3230 /* Reading the Warning flags */
3233 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3235 if(CHECKBIT(val64, 0x7))
3236 stat_info->xpak_stat.warn_transceiver_temp_high++;
3238 if(CHECKBIT(val64, 0x6))
3239 stat_info->xpak_stat.warn_transceiver_temp_low++;
3241 if(CHECKBIT(val64, 0x3))
3242 stat_info->xpak_stat.warn_laser_bias_current_high++;
3244 if(CHECKBIT(val64, 0x2))
3245 stat_info->xpak_stat.warn_laser_bias_current_low++;
3247 if(CHECKBIT(val64, 0x1))
3248 stat_info->xpak_stat.warn_laser_output_power_high++;
3250 if(CHECKBIT(val64, 0x0))
3251 stat_info->xpak_stat.warn_laser_output_power_low++;
3255 * alarm_intr_handler - Alarm Interrrupt handler
3256 * @nic: device private variable
3257 * Description: If the interrupt was neither because of Rx packet or Tx
3258 * complete, this function is called. If the interrupt was to indicate
3259 * a loss of link, the OSM link status handler is invoked for any other
3260 * alarm interrupt the block that raised the interrupt is displayed
3261 * and a H/W reset is issued.
3266 static void alarm_intr_handler(struct s2io_nic *nic)
3268 struct net_device *dev = (struct net_device *) nic->dev;
3269 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3270 register u64 val64 = 0, err_reg = 0;
3273 if (atomic_read(&nic->card_state) == CARD_DOWN)
3275 if (pci_channel_offline(nic->pdev))
3277 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3278 /* Handling the XPAK counters update */
3279 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3280 /* waiting for an hour */
3281 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3283 s2io_updt_xpak_counter(dev);
3284 /* reset the count to zero */
3285 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3288 /* Handling link status change error Intr */
3289 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3290 err_reg = readq(&bar0->mac_rmac_err_reg);
3291 writeq(err_reg, &bar0->mac_rmac_err_reg);
3292 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3293 schedule_work(&nic->set_link_task);
3297 /* Handling Ecc errors */
3298 val64 = readq(&bar0->mc_err_reg);
3299 writeq(val64, &bar0->mc_err_reg);
3300 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3301 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3302 nic->mac_control.stats_info->sw_stat.
3304 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3306 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3307 if (nic->device_type != XFRAME_II_DEVICE) {
3308 /* Reset XframeI only if critical error */
3309 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3310 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3311 netif_stop_queue(dev);
3312 schedule_work(&nic->rst_timer_task);
3313 nic->mac_control.stats_info->sw_stat.
3318 nic->mac_control.stats_info->sw_stat.
3323 /* In case of a serious error, the device will be Reset. */
3324 val64 = readq(&bar0->serr_source);
3325 if (val64 & SERR_SOURCE_ANY) {
3326 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3327 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3328 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3329 (unsigned long long)val64);
3330 netif_stop_queue(dev);
3331 schedule_work(&nic->rst_timer_task);
3332 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3336 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3337 * Error occurs, the adapter will be recycled by disabling the
3338 * adapter enable bit and enabling it again after the device
3339 * becomes Quiescent.
3341 val64 = readq(&bar0->pcc_err_reg);
3342 writeq(val64, &bar0->pcc_err_reg);
3343 if (val64 & PCC_FB_ECC_DB_ERR) {
3344 u64 ac = readq(&bar0->adapter_control);
3345 ac &= ~(ADAPTER_CNTL_EN);
3346 writeq(ac, &bar0->adapter_control);
3347 ac = readq(&bar0->adapter_control);
3348 schedule_work(&nic->set_link_task);
3350 /* Check for data parity error */
3351 val64 = readq(&bar0->pic_int_status);
3352 if (val64 & PIC_INT_GPIO) {
3353 val64 = readq(&bar0->gpio_int_reg);
3354 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3355 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3356 schedule_work(&nic->rst_timer_task);
3357 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3361 /* Check for ring full counter */
3362 if (nic->device_type & XFRAME_II_DEVICE) {
3363 val64 = readq(&bar0->ring_bump_counter1);
3364 for (i=0; i<4; i++) {
3365 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3366 cnt >>= 64 - ((i+1)*16);
3367 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3371 val64 = readq(&bar0->ring_bump_counter2);
3372 for (i=0; i<4; i++) {
3373 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3374 cnt >>= 64 - ((i+1)*16);
3375 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3380 /* Other type of interrupts are not being handled now, TODO */
3384 * wait_for_cmd_complete - waits for a command to complete.
3385 * @sp : private member of the device structure, which is a pointer to the
3386 * s2io_nic structure.
3387 * Description: Function that waits for a command to Write into RMAC
3388 * ADDR DATA registers to be completed and returns either success or
3389 * error depending on whether the command was complete or not.
3391 * SUCCESS on success and FAILURE on failure.
3394 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3397 int ret = FAILURE, cnt = 0, delay = 1;
3400 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3404 val64 = readq(addr);
3405 if (bit_state == S2IO_BIT_RESET) {
3406 if (!(val64 & busy_bit)) {
3411 if (!(val64 & busy_bit)) {
3428 * check_pci_device_id - Checks if the device id is supported
3430 * Description: Function to check if the pci device id is supported by driver.
3431 * Return value: Actual device id if supported else PCI_ANY_ID
3433 static u16 check_pci_device_id(u16 id)
3436 case PCI_DEVICE_ID_HERC_WIN:
3437 case PCI_DEVICE_ID_HERC_UNI:
3438 return XFRAME_II_DEVICE;
3439 case PCI_DEVICE_ID_S2IO_UNI:
3440 case PCI_DEVICE_ID_S2IO_WIN:
3441 return XFRAME_I_DEVICE;
3448 * s2io_reset - Resets the card.
3449 * @sp : private member of the device structure.
3450 * Description: Function to Reset the card. This function then also
3451 * restores the previously saved PCI configuration space registers as
3452 * the card reset also resets the configuration space.
3457 static void s2io_reset(struct s2io_nic * sp)
3459 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3464 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3465 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3467 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3468 __FUNCTION__, sp->dev->name);
3470 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3471 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3473 val64 = SW_RESET_ALL;
3474 writeq(val64, &bar0->sw_reset);
3475 if (strstr(sp->product_name, "CX4")) {
3479 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3481 /* Restore the PCI state saved during initialization. */
3482 pci_restore_state(sp->pdev);
3483 pci_read_config_word(sp->pdev, 0x2, &val16);
3484 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3489 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3490 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3493 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3497 /* Set swapper to enable I/O register access */
3498 s2io_set_swapper(sp);
3500 /* Restore the MSIX table entries from local variables */
3501 restore_xmsi_data(sp);
3503 /* Clear certain PCI/PCI-X fields after reset */
3504 if (sp->device_type == XFRAME_II_DEVICE) {
3505 /* Clear "detected parity error" bit */
3506 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3508 /* Clearing PCIX Ecc status register */
3509 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3511 /* Clearing PCI_STATUS error reflected here */
3512 writeq(BIT(62), &bar0->txpic_int_reg);
3515 /* Reset device statistics maintained by OS */
3516 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3518 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3519 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3520 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3521 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3522 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3523 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3524 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3525 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3526 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3527 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3528 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3529 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3530 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3531 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3532 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3533 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3534 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3535 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3536 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3538 /* SXE-002: Configure link and activity LED to turn it off */
3539 subid = sp->pdev->subsystem_device;
3540 if (((subid & 0xFF) >= 0x07) &&
3541 (sp->device_type == XFRAME_I_DEVICE)) {
3542 val64 = readq(&bar0->gpio_control);
3543 val64 |= 0x0000800000000000ULL;
3544 writeq(val64, &bar0->gpio_control);
3545 val64 = 0x0411040400000000ULL;
3546 writeq(val64, (void __iomem *)bar0 + 0x2700);
3550 * Clear spurious ECC interrupts that would have occured on
3551 * XFRAME II cards after reset.
3553 if (sp->device_type == XFRAME_II_DEVICE) {
3554 val64 = readq(&bar0->pcc_err_reg);
3555 writeq(val64, &bar0->pcc_err_reg);
3558 /* restore the previously assigned mac address */
3559 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3561 sp->device_enabled_once = FALSE;
3565 * s2io_set_swapper - to set the swapper controle on the card
3566 * @sp : private member of the device structure,
3567 * pointer to the s2io_nic structure.
3568 * Description: Function to set the swapper control on the card
3569 * correctly depending on the 'endianness' of the system.
3571 * SUCCESS on success and FAILURE on failure.
3574 static int s2io_set_swapper(struct s2io_nic * sp)
3576 struct net_device *dev = sp->dev;
3577 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3578 u64 val64, valt, valr;
3581 * Set proper endian settings and verify the same by reading
3582 * the PIF Feed-back register.
3585 val64 = readq(&bar0->pif_rd_swapper_fb);
3586 if (val64 != 0x0123456789ABCDEFULL) {
3588 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3589 0x8100008181000081ULL, /* FE=1, SE=0 */
3590 0x4200004242000042ULL, /* FE=0, SE=1 */
3591 0}; /* FE=0, SE=0 */
3594 writeq(value[i], &bar0->swapper_ctrl);
3595 val64 = readq(&bar0->pif_rd_swapper_fb);
3596 if (val64 == 0x0123456789ABCDEFULL)
3601 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3603 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3604 (unsigned long long) val64);
3609 valr = readq(&bar0->swapper_ctrl);
3612 valt = 0x0123456789ABCDEFULL;
3613 writeq(valt, &bar0->xmsi_address);
3614 val64 = readq(&bar0->xmsi_address);
3618 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3619 0x0081810000818100ULL, /* FE=1, SE=0 */
3620 0x0042420000424200ULL, /* FE=0, SE=1 */
3621 0}; /* FE=0, SE=0 */
3624 writeq((value[i] | valr), &bar0->swapper_ctrl);
3625 writeq(valt, &bar0->xmsi_address);
3626 val64 = readq(&bar0->xmsi_address);
3632 unsigned long long x = val64;
3633 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3634 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3638 val64 = readq(&bar0->swapper_ctrl);
3639 val64 &= 0xFFFF000000000000ULL;
3643 * The device by default set to a big endian format, so a
3644 * big endian driver need not set anything.
3646 val64 |= (SWAPPER_CTRL_TXP_FE |
3647 SWAPPER_CTRL_TXP_SE |
3648 SWAPPER_CTRL_TXD_R_FE |
3649 SWAPPER_CTRL_TXD_W_FE |
3650 SWAPPER_CTRL_TXF_R_FE |
3651 SWAPPER_CTRL_RXD_R_FE |
3652 SWAPPER_CTRL_RXD_W_FE |
3653 SWAPPER_CTRL_RXF_W_FE |
3654 SWAPPER_CTRL_XMSI_FE |
3655 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3656 if (sp->intr_type == INTA)
3657 val64 |= SWAPPER_CTRL_XMSI_SE;
3658 writeq(val64, &bar0->swapper_ctrl);
3661 * Initially we enable all bits to make it accessible by the
3662 * driver, then we selectively enable only those bits that
3665 val64 |= (SWAPPER_CTRL_TXP_FE |
3666 SWAPPER_CTRL_TXP_SE |
3667 SWAPPER_CTRL_TXD_R_FE |
3668 SWAPPER_CTRL_TXD_R_SE |
3669 SWAPPER_CTRL_TXD_W_FE |
3670 SWAPPER_CTRL_TXD_W_SE |
3671 SWAPPER_CTRL_TXF_R_FE |
3672 SWAPPER_CTRL_RXD_R_FE |
3673 SWAPPER_CTRL_RXD_R_SE |
3674 SWAPPER_CTRL_RXD_W_FE |
3675 SWAPPER_CTRL_RXD_W_SE |
3676 SWAPPER_CTRL_RXF_W_FE |
3677 SWAPPER_CTRL_XMSI_FE |
3678 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3679 if (sp->intr_type == INTA)
3680 val64 |= SWAPPER_CTRL_XMSI_SE;
3681 writeq(val64, &bar0->swapper_ctrl);
3683 val64 = readq(&bar0->swapper_ctrl);
3686 * Verifying if endian settings are accurate by reading a
3687 * feedback register.
3689 val64 = readq(&bar0->pif_rd_swapper_fb);
3690 if (val64 != 0x0123456789ABCDEFULL) {
3691 /* Endian settings are incorrect, calls for another dekko. */
3692 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3694 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3695 (unsigned long long) val64);
3702 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3704 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3706 int ret = 0, cnt = 0;
3709 val64 = readq(&bar0->xmsi_access);
3710 if (!(val64 & BIT(15)))
3716 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3723 static void restore_xmsi_data(struct s2io_nic *nic)
3725 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3729 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3730 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3731 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3732 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3733 writeq(val64, &bar0->xmsi_access);
3734 if (wait_for_msix_trans(nic, i)) {
3735 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3741 static void store_xmsi_data(struct s2io_nic *nic)
3743 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3744 u64 val64, addr, data;
3747 /* Store and display */
3748 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3749 val64 = (BIT(15) | vBIT(i, 26, 6));
3750 writeq(val64, &bar0->xmsi_access);
3751 if (wait_for_msix_trans(nic, i)) {
3752 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3755 addr = readq(&bar0->xmsi_address);
3756 data = readq(&bar0->xmsi_data);
3758 nic->msix_info[i].addr = addr;
3759 nic->msix_info[i].data = data;
3764 static int s2io_enable_msi_x(struct s2io_nic *nic)
3766 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3768 u16 msi_control; /* Temp variable */
3769 int ret, i, j, msix_indx = 1;
3771 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3773 if (nic->entries == NULL) {
3774 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3776 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3779 nic->mac_control.stats_info->sw_stat.mem_allocated
3780 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3781 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3784 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3786 if (nic->s2io_entries == NULL) {
3787 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3789 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3790 kfree(nic->entries);
3791 nic->mac_control.stats_info->sw_stat.mem_freed
3792 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3795 nic->mac_control.stats_info->sw_stat.mem_allocated
3796 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3797 memset(nic->s2io_entries, 0,
3798 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3800 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3801 nic->entries[i].entry = i;
3802 nic->s2io_entries[i].entry = i;
3803 nic->s2io_entries[i].arg = NULL;
3804 nic->s2io_entries[i].in_use = 0;
3807 tx_mat = readq(&bar0->tx_mat0_n[0]);
3808 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3809 tx_mat |= TX_MAT_SET(i, msix_indx);
3810 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3811 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3812 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3814 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3816 if (!nic->config.bimodal) {
3817 rx_mat = readq(&bar0->rx_mat);
3818 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3819 rx_mat |= RX_MAT_SET(j, msix_indx);
3820 nic->s2io_entries[msix_indx].arg
3821 = &nic->mac_control.rings[j];
3822 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3823 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3825 writeq(rx_mat, &bar0->rx_mat);
3827 tx_mat = readq(&bar0->tx_mat0_n[7]);
3828 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3829 tx_mat |= TX_MAT_SET(i, msix_indx);
3830 nic->s2io_entries[msix_indx].arg
3831 = &nic->mac_control.rings[j];
3832 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3833 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3835 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3838 nic->avail_msix_vectors = 0;
3839 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3840 /* We fail init if error or we get less vectors than min required */
3841 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3842 nic->avail_msix_vectors = ret;
3843 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3846 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3847 kfree(nic->entries);
3848 nic->mac_control.stats_info->sw_stat.mem_freed
3849 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3850 kfree(nic->s2io_entries);
3851 nic->mac_control.stats_info->sw_stat.mem_freed
3852 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3853 nic->entries = NULL;
3854 nic->s2io_entries = NULL;
3855 nic->avail_msix_vectors = 0;
3858 if (!nic->avail_msix_vectors)
3859 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3862 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3863 * in the herc NIC. (Temp change, needs to be removed later)
3865 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3866 msi_control |= 0x1; /* Enable MSI */
3867 pci_write_config_word(nic->pdev, 0x42, msi_control);
3872 /* Handle software interrupt used during MSI(X) test */
3873 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3875 struct s2io_nic *sp = dev_id;
3877 sp->msi_detected = 1;
3878 wake_up(&sp->msi_wait);
3883 /* Test interrupt path by forcing a a software IRQ */
3884 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3886 struct pci_dev *pdev = sp->pdev;
3887 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3891 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3894 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3895 sp->dev->name, pci_name(pdev), pdev->irq);
3899 init_waitqueue_head (&sp->msi_wait);
3900 sp->msi_detected = 0;
3902 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3903 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3904 val64 |= SCHED_INT_CTRL_TIMER_EN;
3905 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3906 writeq(val64, &bar0->scheduled_int_ctrl);
3908 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3910 if (!sp->msi_detected) {
3911 /* MSI(X) test failed, go back to INTx mode */
3912 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3913 "using MSI(X) during test\n", sp->dev->name,
3919 free_irq(sp->entries[1].vector, sp);
3921 writeq(saved64, &bar0->scheduled_int_ctrl);
3925 /* ********************************************************* *
3926 * Functions defined below concern the OS part of the driver *
3927 * ********************************************************* */
3930 * s2io_open - open entry point of the driver
3931 * @dev : pointer to the device structure.
3933 * This function is the open entry point of the driver. It mainly calls a
3934 * function to allocate Rx buffers and inserts them into the buffer
3935 * descriptors and then enables the Rx part of the NIC.
3937 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3941 static int s2io_open(struct net_device *dev)
3943 struct s2io_nic *sp = dev->priv;
3947 * Make sure you have link off by default every time
3948 * Nic is initialized
3950 netif_carrier_off(dev);
3951 sp->last_link_state = 0;
3953 napi_enable(&sp->napi);
3955 if (sp->intr_type == MSI_X) {
3956 int ret = s2io_enable_msi_x(sp);
3961 ret = s2io_test_msi(sp);
3963 /* rollback MSI-X, will re-enable during add_isr() */
3965 sp->mac_control.stats_info->sw_stat.mem_freed +=
3966 (MAX_REQUESTED_MSI_X *
3967 sizeof(struct msix_entry));
3968 kfree(sp->s2io_entries);
3969 sp->mac_control.stats_info->sw_stat.mem_freed +=
3970 (MAX_REQUESTED_MSI_X *
3971 sizeof(struct s2io_msix_entry));
3973 sp->s2io_entries = NULL;
3975 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3976 msi_control &= 0xFFFE; /* Disable MSI */
3977 pci_write_config_word(sp->pdev, 0x42, msi_control);
3979 pci_disable_msix(sp->pdev);
3985 "%s: MSI-X requested but failed to enable\n",
3987 sp->intr_type = INTA;
3991 /* NAPI doesn't work well with MSI(X) */
3992 if (sp->intr_type != INTA) {
3994 sp->config.napi = 0;
3997 /* Initialize H/W and enable interrupts */
3998 err = s2io_card_up(sp);
4000 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4002 goto hw_init_failed;
4005 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
4006 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4009 goto hw_init_failed;
4012 netif_start_queue(dev);
4016 napi_disable(&sp->napi);
4017 if (sp->intr_type == MSI_X) {
4020 sp->mac_control.stats_info->sw_stat.mem_freed
4021 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4023 if (sp->s2io_entries) {
4024 kfree(sp->s2io_entries);
4025 sp->mac_control.stats_info->sw_stat.mem_freed
4026 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4033 * s2io_close -close entry point of the driver
4034 * @dev : device pointer.
4036 * This is the stop entry point of the driver. It needs to undo exactly
4037 * whatever was done by the open entry point,thus it's usually referred to
4038 * as the close function.Among other things this function mainly stops the
4039 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4041 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4045 static int s2io_close(struct net_device *dev)
4047 struct s2io_nic *sp = dev->priv;
4049 netif_stop_queue(dev);
4050 napi_disable(&sp->napi);
4051 /* Reset card, kill tasklet and free Tx and Rx buffers. */
4058 * s2io_xmit - Tx entry point of te driver
4059 * @skb : the socket buffer containing the Tx data.
4060 * @dev : device pointer.
4062 * This function is the Tx entry point of the driver. S2IO NIC supports
4063 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4064 * NOTE: when device cant queue the pkt,just the trans_start variable will
4067 * 0 on success & 1 on failure.
4070 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4072 struct s2io_nic *sp = dev->priv;
4073 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4076 struct TxFIFO_element __iomem *tx_fifo;
4077 unsigned long flags;
4079 int vlan_priority = 0;
4080 struct mac_info *mac_control;
4081 struct config_param *config;
4083 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4085 mac_control = &sp->mac_control;
4086 config = &sp->config;
4088 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4090 if (unlikely(skb->len <= 0)) {
4091 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4092 dev_kfree_skb_any(skb);
4096 spin_lock_irqsave(&sp->tx_lock, flags);
4097 if (atomic_read(&sp->card_state) == CARD_DOWN) {
4098 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4100 spin_unlock_irqrestore(&sp->tx_lock, flags);
4106 /* Get Fifo number to Transmit based on vlan priority */
4107 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4108 vlan_tag = vlan_tx_tag_get(skb);
4109 vlan_priority = vlan_tag >> 13;
4110 queue = config->fifo_mapping[vlan_priority];
4113 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4114 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4115 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4118 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4119 /* Avoid "put" pointer going beyond "get" pointer */
4120 if (txdp->Host_Control ||
4121 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4122 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4123 netif_stop_queue(dev);
4125 spin_unlock_irqrestore(&sp->tx_lock, flags);
4129 offload_type = s2io_offload_type(skb);
4130 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4131 txdp->Control_1 |= TXD_TCP_LSO_EN;
4132 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4134 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4136 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4139 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4140 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4141 txdp->Control_2 |= config->tx_intr_type;
4143 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4144 txdp->Control_2 |= TXD_VLAN_ENABLE;
4145 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4148 frg_len = skb->len - skb->data_len;
4149 if (offload_type == SKB_GSO_UDP) {
4152 ufo_size = s2io_udp_mss(skb);
4154 txdp->Control_1 |= TXD_UFO_EN;
4155 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4156 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4158 sp->ufo_in_band_v[put_off] =
4159 (u64)skb_shinfo(skb)->ip6_frag_id;
4161 sp->ufo_in_band_v[put_off] =
4162 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4164 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4165 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4167 sizeof(u64), PCI_DMA_TODEVICE);
4168 if((txdp->Buffer_Pointer == 0) ||
4169 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4170 goto pci_map_failed;
4174 txdp->Buffer_Pointer = pci_map_single
4175 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4176 if((txdp->Buffer_Pointer == 0) ||
4177 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4178 goto pci_map_failed;
4180 txdp->Host_Control = (unsigned long) skb;
4181 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4182 if (offload_type == SKB_GSO_UDP)
4183 txdp->Control_1 |= TXD_UFO_EN;
4185 frg_cnt = skb_shinfo(skb)->nr_frags;
4186 /* For fragmented SKB. */
4187 for (i = 0; i < frg_cnt; i++) {
4188 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4189 /* A '0' length fragment will be ignored */
4193 txdp->Buffer_Pointer = (u64) pci_map_page
4194 (sp->pdev, frag->page, frag->page_offset,
4195 frag->size, PCI_DMA_TODEVICE);
4196 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4197 if (offload_type == SKB_GSO_UDP)
4198 txdp->Control_1 |= TXD_UFO_EN;
4200 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4202 if (offload_type == SKB_GSO_UDP)
4203 frg_cnt++; /* as Txd0 was used for inband header */
4205 tx_fifo = mac_control->tx_FIFO_start[queue];
4206 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4207 writeq(val64, &tx_fifo->TxDL_Pointer);
4209 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4212 val64 |= TX_FIFO_SPECIAL_FUNC;
4214 writeq(val64, &tx_fifo->List_Control);
4219 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4221 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4223 /* Avoid "put" pointer going beyond "get" pointer */
4224 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4225 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4227 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4229 netif_stop_queue(dev);
4231 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4232 dev->trans_start = jiffies;
4233 spin_unlock_irqrestore(&sp->tx_lock, flags);
4237 stats->pci_map_fail_cnt++;
4238 netif_stop_queue(dev);
4239 stats->mem_freed += skb->truesize;
4241 spin_unlock_irqrestore(&sp->tx_lock, flags);
4246 s2io_alarm_handle(unsigned long data)
4248 struct s2io_nic *sp = (struct s2io_nic *)data;
4250 alarm_intr_handler(sp);
4251 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4254 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4256 int rxb_size, level;
4259 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4260 level = rx_buffer_level(sp, rxb_size, rng_n);
4262 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4264 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4265 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4266 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4267 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4269 clear_bit(0, (&sp->tasklet_status));
4272 clear_bit(0, (&sp->tasklet_status));
4273 } else if (level == LOW)
4274 tasklet_schedule(&sp->task);
4276 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4277 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4278 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4283 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4285 struct ring_info *ring = (struct ring_info *)dev_id;
4286 struct s2io_nic *sp = ring->nic;
4288 atomic_inc(&sp->isr_cnt);
4290 rx_intr_handler(ring);
4291 s2io_chk_rx_buffers(sp, ring->ring_no);
4293 atomic_dec(&sp->isr_cnt);
4297 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4299 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4300 struct s2io_nic *sp = fifo->nic;
4302 atomic_inc(&sp->isr_cnt);
4303 tx_intr_handler(fifo);
4304 atomic_dec(&sp->isr_cnt);
4307 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4309 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4312 val64 = readq(&bar0->pic_int_status);
4313 if (val64 & PIC_INT_GPIO) {
4314 val64 = readq(&bar0->gpio_int_reg);
4315 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4316 (val64 & GPIO_INT_REG_LINK_UP)) {
4318 * This is unstable state so clear both up/down
4319 * interrupt and adapter to re-evaluate the link state.
4321 val64 |= GPIO_INT_REG_LINK_DOWN;
4322 val64 |= GPIO_INT_REG_LINK_UP;
4323 writeq(val64, &bar0->gpio_int_reg);
4324 val64 = readq(&bar0->gpio_int_mask);
4325 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4326 GPIO_INT_MASK_LINK_DOWN);
4327 writeq(val64, &bar0->gpio_int_mask);
4329 else if (val64 & GPIO_INT_REG_LINK_UP) {
4330 val64 = readq(&bar0->adapter_status);
4331 /* Enable Adapter */
4332 val64 = readq(&bar0->adapter_control);
4333 val64 |= ADAPTER_CNTL_EN;
4334 writeq(val64, &bar0->adapter_control);
4335 val64 |= ADAPTER_LED_ON;
4336 writeq(val64, &bar0->adapter_control);
4337 if (!sp->device_enabled_once)
4338 sp->device_enabled_once = 1;
4340 s2io_link(sp, LINK_UP);
4342 * unmask link down interrupt and mask link-up
4345 val64 = readq(&bar0->gpio_int_mask);
4346 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4347 val64 |= GPIO_INT_MASK_LINK_UP;
4348 writeq(val64, &bar0->gpio_int_mask);
4350 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4351 val64 = readq(&bar0->adapter_status);
4352 s2io_link(sp, LINK_DOWN);
4353 /* Link is down so unmaks link up interrupt */
4354 val64 = readq(&bar0->gpio_int_mask);
4355 val64 &= ~GPIO_INT_MASK_LINK_UP;
4356 val64 |= GPIO_INT_MASK_LINK_DOWN;
4357 writeq(val64, &bar0->gpio_int_mask);
4360 val64 = readq(&bar0->adapter_control);
4361 val64 = val64 &(~ADAPTER_LED_ON);
4362 writeq(val64, &bar0->adapter_control);
4365 val64 = readq(&bar0->gpio_int_mask);
4369 * s2io_isr - ISR handler of the device .
4370 * @irq: the irq of the device.
4371 * @dev_id: a void pointer to the dev structure of the NIC.
4372 * Description: This function is the ISR handler of the device. It
4373 * identifies the reason for the interrupt and calls the relevant
4374 * service routines. As a contongency measure, this ISR allocates the
4375 * recv buffers, if their numbers are below the panic value which is
4376 * presently set to 25% of the original number of rcv buffers allocated.
4378 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4379 * IRQ_NONE: will be returned if interrupt is not from our device
4381 static irqreturn_t s2io_isr(int irq, void *dev_id)
4383 struct net_device *dev = (struct net_device *) dev_id;
4384 struct s2io_nic *sp = dev->priv;
4385 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4388 struct mac_info *mac_control;
4389 struct config_param *config;
4391 /* Pretend we handled any irq's from a disconnected card */
4392 if (pci_channel_offline(sp->pdev))
4395 atomic_inc(&sp->isr_cnt);
4396 mac_control = &sp->mac_control;
4397 config = &sp->config;
4400 * Identify the cause for interrupt and call the appropriate
4401 * interrupt handler. Causes for the interrupt could be;
4405 * 4. Error in any functional blocks of the NIC.
4407 reason = readq(&bar0->general_int_status);
4410 /* The interrupt was not raised by us. */
4411 atomic_dec(&sp->isr_cnt);
4414 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4415 /* Disable device and get out */
4416 atomic_dec(&sp->isr_cnt);
4421 if (reason & GEN_INTR_RXTRAFFIC) {
4422 if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4423 __netif_rx_schedule(dev, &sp->napi);
4424 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4427 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4431 * Rx handler is called by default, without checking for the
4432 * cause of interrupt.
4433 * rx_traffic_int reg is an R1 register, writing all 1's
4434 * will ensure that the actual interrupt causing bit get's
4435 * cleared and hence a read can be avoided.
4437 if (reason & GEN_INTR_RXTRAFFIC)
4438 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4440 for (i = 0; i < config->rx_ring_num; i++) {
4441 rx_intr_handler(&mac_control->rings[i]);
4446 * tx_traffic_int reg is an R1 register, writing all 1's
4447 * will ensure that the actual interrupt causing bit get's
4448 * cleared and hence a read can be avoided.
4450 if (reason & GEN_INTR_TXTRAFFIC)
4451 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4453 for (i = 0; i < config->tx_fifo_num; i++)
4454 tx_intr_handler(&mac_control->fifos[i]);
4456 if (reason & GEN_INTR_TXPIC)
4457 s2io_txpic_intr_handle(sp);
4459 * If the Rx buffer count is below the panic threshold then
4460 * reallocate the buffers from the interrupt handler itself,
4461 * else schedule a tasklet to reallocate the buffers.
4464 for (i = 0; i < config->rx_ring_num; i++)
4465 s2io_chk_rx_buffers(sp, i);
4468 writeq(0, &bar0->general_int_mask);
4469 readl(&bar0->general_int_status);
4471 atomic_dec(&sp->isr_cnt);
4478 static void s2io_updt_stats(struct s2io_nic *sp)
4480 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4484 if (atomic_read(&sp->card_state) == CARD_UP) {
4485 /* Apprx 30us on a 133 MHz bus */
4486 val64 = SET_UPDT_CLICKS(10) |
4487 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4488 writeq(val64, &bar0->stat_cfg);
4491 val64 = readq(&bar0->stat_cfg);
4492 if (!(val64 & BIT(0)))
4496 break; /* Updt failed */
4502 * s2io_get_stats - Updates the device statistics structure.
4503 * @dev : pointer to the device structure.
4505 * This function updates the device statistics structure in the s2io_nic
4506 * structure and returns a pointer to the same.
4508 * pointer to the updated net_device_stats structure.
4511 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4513 struct s2io_nic *sp = dev->priv;
4514 struct mac_info *mac_control;
4515 struct config_param *config;
4518 mac_control = &sp->mac_control;
4519 config = &sp->config;
4521 /* Configure Stats for immediate updt */
4522 s2io_updt_stats(sp);
4524 sp->stats.tx_packets =
4525 le32_to_cpu(mac_control->stats_info->tmac_frms);
4526 sp->stats.tx_errors =
4527 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4528 sp->stats.rx_errors =
4529 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4530 sp->stats.multicast =
4531 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4532 sp->stats.rx_length_errors =
4533 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4535 return (&sp->stats);
4539 * s2io_set_multicast - entry point for multicast address enable/disable.
4540 * @dev : pointer to the device structure
4542 * This function is a driver entry point which gets called by the kernel
4543 * whenever multicast addresses must be enabled/disabled. This also gets
4544 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4545 * determine, if multicast address must be enabled or if promiscuous mode
4546 * is to be disabled etc.
4551 static void s2io_set_multicast(struct net_device *dev)
4554 struct dev_mc_list *mclist;
4555 struct s2io_nic *sp = dev->priv;
4556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4557 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4559 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4562 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4563 /* Enable all Multicast addresses */
4564 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4565 &bar0->rmac_addr_data0_mem);
4566 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4567 &bar0->rmac_addr_data1_mem);
4568 val64 = RMAC_ADDR_CMD_MEM_WE |
4569 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4570 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4571 writeq(val64, &bar0->rmac_addr_cmd_mem);
4572 /* Wait till command completes */
4573 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4574 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4578 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4579 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4580 /* Disable all Multicast addresses */
4581 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4582 &bar0->rmac_addr_data0_mem);
4583 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4584 &bar0->rmac_addr_data1_mem);
4585 val64 = RMAC_ADDR_CMD_MEM_WE |
4586 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4587 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4588 writeq(val64, &bar0->rmac_addr_cmd_mem);
4589 /* Wait till command completes */
4590 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4591 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4595 sp->all_multi_pos = 0;
4598 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4599 /* Put the NIC into promiscuous mode */
4600 add = &bar0->mac_cfg;
4601 val64 = readq(&bar0->mac_cfg);
4602 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4604 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4605 writel((u32) val64, add);
4606 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4607 writel((u32) (val64 >> 32), (add + 4));
4609 if (vlan_tag_strip != 1) {
4610 val64 = readq(&bar0->rx_pa_cfg);
4611 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4612 writeq(val64, &bar0->rx_pa_cfg);
4613 vlan_strip_flag = 0;
4616 val64 = readq(&bar0->mac_cfg);
4617 sp->promisc_flg = 1;
4618 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4620 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4621 /* Remove the NIC from promiscuous mode */
4622 add = &bar0->mac_cfg;
4623 val64 = readq(&bar0->mac_cfg);
4624 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4626 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4627 writel((u32) val64, add);
4628 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4629 writel((u32) (val64 >> 32), (add + 4));
4631 if (vlan_tag_strip != 0) {
4632 val64 = readq(&bar0->rx_pa_cfg);
4633 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4634 writeq(val64, &bar0->rx_pa_cfg);
4635 vlan_strip_flag = 1;
4638 val64 = readq(&bar0->mac_cfg);
4639 sp->promisc_flg = 0;
4640 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4644 /* Update individual M_CAST address list */
4645 if ((!sp->m_cast_flg) && dev->mc_count) {
4647 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4648 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4650 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4651 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4655 prev_cnt = sp->mc_addr_count;
4656 sp->mc_addr_count = dev->mc_count;
4658 /* Clear out the previous list of Mc in the H/W. */
4659 for (i = 0; i < prev_cnt; i++) {
4660 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4661 &bar0->rmac_addr_data0_mem);
4662 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4663 &bar0->rmac_addr_data1_mem);
4664 val64 = RMAC_ADDR_CMD_MEM_WE |
4665 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4666 RMAC_ADDR_CMD_MEM_OFFSET
4667 (MAC_MC_ADDR_START_OFFSET + i);
4668 writeq(val64, &bar0->rmac_addr_cmd_mem);
4670 /* Wait for command completes */
4671 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4672 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4674 DBG_PRINT(ERR_DBG, "%s: Adding ",
4676 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4681 /* Create the new Rx filter list and update the same in H/W. */
4682 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4683 i++, mclist = mclist->next) {
4684 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4687 for (j = 0; j < ETH_ALEN; j++) {
4688 mac_addr |= mclist->dmi_addr[j];
4692 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4693 &bar0->rmac_addr_data0_mem);
4694 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4695 &bar0->rmac_addr_data1_mem);
4696 val64 = RMAC_ADDR_CMD_MEM_WE |
4697 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4698 RMAC_ADDR_CMD_MEM_OFFSET
4699 (i + MAC_MC_ADDR_START_OFFSET);
4700 writeq(val64, &bar0->rmac_addr_cmd_mem);
4702 /* Wait for command completes */
4703 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4704 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4706 DBG_PRINT(ERR_DBG, "%s: Adding ",
4708 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4716 * s2io_set_mac_addr - Programs the Xframe mac address
4717 * @dev : pointer to the device structure.
4718 * @addr: a uchar pointer to the new mac address which is to be set.
4719 * Description : This procedure will program the Xframe to receive
4720 * frames with new Mac Address
4721 * Return value: SUCCESS on success and an appropriate (-)ve integer
4722 * as defined in errno.h file on failure.
4725 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4727 struct s2io_nic *sp = dev->priv;
4728 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4729 register u64 val64, mac_addr = 0;
4731 u64 old_mac_addr = 0;
4734 * Set the new MAC address as the new unicast filter and reflect this
4735 * change on the device address registered with the OS. It will be
4738 for (i = 0; i < ETH_ALEN; i++) {
4740 mac_addr |= addr[i];
4742 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4748 /* Update the internal structure with this new mac address */
4749 if(mac_addr != old_mac_addr) {
4750 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4751 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4752 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4753 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4754 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4755 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4756 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4759 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4760 &bar0->rmac_addr_data0_mem);
4763 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4764 RMAC_ADDR_CMD_MEM_OFFSET(0);
4765 writeq(val64, &bar0->rmac_addr_cmd_mem);
4766 /* Wait till command completes */
4767 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4768 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4769 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4777 * s2io_ethtool_sset - Sets different link parameters.
4778 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4779 * @info: pointer to the structure with parameters given by ethtool to set
4782 * The function sets different link parameters provided by the user onto
4788 static int s2io_ethtool_sset(struct net_device *dev,
4789 struct ethtool_cmd *info)
4791 struct s2io_nic *sp = dev->priv;
4792 if ((info->autoneg == AUTONEG_ENABLE) ||
4793 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4796 s2io_close(sp->dev);
4804 * s2io_ethtol_gset - Return link specific information.
4805 * @sp : private member of the device structure, pointer to the
4806 * s2io_nic structure.
4807 * @info : pointer to the structure with parameters given by ethtool
4808 * to return link information.
4810 * Returns link specific information like speed, duplex etc.. to ethtool.
4812 * return 0 on success.
4815 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4817 struct s2io_nic *sp = dev->priv;
4818 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4819 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4820 info->port = PORT_FIBRE;
4821 /* info->transceiver?? TODO */
4823 if (netif_carrier_ok(sp->dev)) {
4824 info->speed = 10000;
4825 info->duplex = DUPLEX_FULL;
4831 info->autoneg = AUTONEG_DISABLE;
4836 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4837 * @sp : private member of the device structure, which is a pointer to the
4838 * s2io_nic structure.
4839 * @info : pointer to the structure with parameters given by ethtool to
4840 * return driver information.
4842 * Returns driver specefic information like name, version etc.. to ethtool.
4847 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4848 struct ethtool_drvinfo *info)
4850 struct s2io_nic *sp = dev->priv;
4852 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4853 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4854 strncpy(info->fw_version, "", sizeof(info->fw_version));
4855 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4856 info->regdump_len = XENA_REG_SPACE;
4857 info->eedump_len = XENA_EEPROM_SPACE;
4858 info->testinfo_len = S2IO_TEST_LEN;
4860 if (sp->device_type == XFRAME_I_DEVICE)
4861 info->n_stats = XFRAME_I_STAT_LEN;
4863 info->n_stats = XFRAME_II_STAT_LEN;
4867 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4868 * @sp: private member of the device structure, which is a pointer to the
4869 * s2io_nic structure.
4870 * @regs : pointer to the structure with parameters given by ethtool for
4871 * dumping the registers.
4872 * @reg_space: The input argumnet into which all the registers are dumped.
4874 * Dumps the entire register space of xFrame NIC into the user given
4880 static void s2io_ethtool_gregs(struct net_device *dev,
4881 struct ethtool_regs *regs, void *space)
4885 u8 *reg_space = (u8 *) space;
4886 struct s2io_nic *sp = dev->priv;
4888 regs->len = XENA_REG_SPACE;
4889 regs->version = sp->pdev->subsystem_device;
4891 for (i = 0; i < regs->len; i += 8) {
4892 reg = readq(sp->bar0 + i);
4893 memcpy((reg_space + i), ®, 8);
4898 * s2io_phy_id - timer function that alternates adapter LED.
4899 * @data : address of the private member of the device structure, which
4900 * is a pointer to the s2io_nic structure, provided as an u32.
4901 * Description: This is actually the timer function that alternates the
4902 * adapter LED bit of the adapter control bit to set/reset every time on
4903 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4904 * once every second.
4906 static void s2io_phy_id(unsigned long data)
4908 struct s2io_nic *sp = (struct s2io_nic *) data;
4909 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4913 subid = sp->pdev->subsystem_device;
4914 if ((sp->device_type == XFRAME_II_DEVICE) ||
4915 ((subid & 0xFF) >= 0x07)) {
4916 val64 = readq(&bar0->gpio_control);
4917 val64 ^= GPIO_CTRL_GPIO_0;
4918 writeq(val64, &bar0->gpio_control);
4920 val64 = readq(&bar0->adapter_control);
4921 val64 ^= ADAPTER_LED_ON;
4922 writeq(val64, &bar0->adapter_control);
4925 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4929 * s2io_ethtool_idnic - To physically identify the nic on the system.
4930 * @sp : private member of the device structure, which is a pointer to the
4931 * s2io_nic structure.
4932 * @id : pointer to the structure with identification parameters given by
4934 * Description: Used to physically identify the NIC on the system.
4935 * The Link LED will blink for a time specified by the user for
4937 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4938 * identification is possible only if it's link is up.
4940 * int , returns 0 on success
4943 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4945 u64 val64 = 0, last_gpio_ctrl_val;
4946 struct s2io_nic *sp = dev->priv;
4947 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4950 subid = sp->pdev->subsystem_device;
4951 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4952 if ((sp->device_type == XFRAME_I_DEVICE) &&
4953 ((subid & 0xFF) < 0x07)) {
4954 val64 = readq(&bar0->adapter_control);
4955 if (!(val64 & ADAPTER_CNTL_EN)) {
4957 "Adapter Link down, cannot blink LED\n");
4961 if (sp->id_timer.function == NULL) {
4962 init_timer(&sp->id_timer);
4963 sp->id_timer.function = s2io_phy_id;
4964 sp->id_timer.data = (unsigned long) sp;
4966 mod_timer(&sp->id_timer, jiffies);
4968 msleep_interruptible(data * HZ);
4970 msleep_interruptible(MAX_FLICKER_TIME);
4971 del_timer_sync(&sp->id_timer);
4973 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4974 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4975 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4981 static void s2io_ethtool_gringparam(struct net_device *dev,
4982 struct ethtool_ringparam *ering)
4984 struct s2io_nic *sp = dev->priv;
4985 int i,tx_desc_count=0,rx_desc_count=0;
4987 if (sp->rxd_mode == RXD_MODE_1)
4988 ering->rx_max_pending = MAX_RX_DESC_1;
4989 else if (sp->rxd_mode == RXD_MODE_3B)
4990 ering->rx_max_pending = MAX_RX_DESC_2;
4992 ering->tx_max_pending = MAX_TX_DESC;
4993 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
4994 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4996 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4997 ering->tx_pending = tx_desc_count;
4999 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5000 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5002 ering->rx_pending = rx_desc_count;
5004 ering->rx_mini_max_pending = 0;
5005 ering->rx_mini_pending = 0;
5006 if(sp->rxd_mode == RXD_MODE_1)
5007 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5008 else if (sp->rxd_mode == RXD_MODE_3B)
5009 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5010 ering->rx_jumbo_pending = rx_desc_count;
5014 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5015 * @sp : private member of the device structure, which is a pointer to the
5016 * s2io_nic structure.
5017 * @ep : pointer to the structure with pause parameters given by ethtool.
5019 * Returns the Pause frame generation and reception capability of the NIC.
5023 static void s2io_ethtool_getpause_data(struct net_device *dev,
5024 struct ethtool_pauseparam *ep)
5027 struct s2io_nic *sp = dev->priv;
5028 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5030 val64 = readq(&bar0->rmac_pause_cfg);
5031 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5032 ep->tx_pause = TRUE;
5033 if (val64 & RMAC_PAUSE_RX_ENABLE)
5034 ep->rx_pause = TRUE;
5035 ep->autoneg = FALSE;
5039 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5040 * @sp : private member of the device structure, which is a pointer to the
5041 * s2io_nic structure.
5042 * @ep : pointer to the structure with pause parameters given by ethtool.
5044 * It can be used to set or reset Pause frame generation or reception
5045 * support of the NIC.
5047 * int, returns 0 on Success
5050 static int s2io_ethtool_setpause_data(struct net_device *dev,
5051 struct ethtool_pauseparam *ep)
5054 struct s2io_nic *sp = dev->priv;
5055 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5057 val64 = readq(&bar0->rmac_pause_cfg);
5059 val64 |= RMAC_PAUSE_GEN_ENABLE;
5061 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5063 val64 |= RMAC_PAUSE_RX_ENABLE;
5065 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5066 writeq(val64, &bar0->rmac_pause_cfg);
5071 * read_eeprom - reads 4 bytes of data from user given offset.
5072 * @sp : private member of the device structure, which is a pointer to the
5073 * s2io_nic structure.
5074 * @off : offset at which the data must be written
5075 * @data : Its an output parameter where the data read at the given
5078 * Will read 4 bytes of data from the user given offset and return the
5080 * NOTE: Will allow to read only part of the EEPROM visible through the
5083 * -1 on failure and 0 on success.
5086 #define S2IO_DEV_ID 5
5087 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5094 if (sp->device_type == XFRAME_I_DEVICE) {
5095 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5096 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5097 I2C_CONTROL_CNTL_START;
5098 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5100 while (exit_cnt < 5) {
5101 val64 = readq(&bar0->i2c_control);
5102 if (I2C_CONTROL_CNTL_END(val64)) {
5103 *data = I2C_CONTROL_GET_DATA(val64);
5112 if (sp->device_type == XFRAME_II_DEVICE) {
5113 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5114 SPI_CONTROL_BYTECNT(0x3) |
5115 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5116 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5117 val64 |= SPI_CONTROL_REQ;
5118 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5119 while (exit_cnt < 5) {
5120 val64 = readq(&bar0->spi_control);
5121 if (val64 & SPI_CONTROL_NACK) {
5124 } else if (val64 & SPI_CONTROL_DONE) {
5125 *data = readq(&bar0->spi_data);
5138 * write_eeprom - actually writes the relevant part of the data value.
5139 * @sp : private member of the device structure, which is a pointer to the
5140 * s2io_nic structure.
5141 * @off : offset at which the data must be written
5142 * @data : The data that is to be written
5143 * @cnt : Number of bytes of the data that are actually to be written into
5144 * the Eeprom. (max of 3)
5146 * Actually writes the relevant part of the data value into the Eeprom
5147 * through the I2C bus.
5149 * 0 on success, -1 on failure.
5152 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5154 int exit_cnt = 0, ret = -1;
5156 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5158 if (sp->device_type == XFRAME_I_DEVICE) {
5159 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5160 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5161 I2C_CONTROL_CNTL_START;
5162 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5164 while (exit_cnt < 5) {
5165 val64 = readq(&bar0->i2c_control);
5166 if (I2C_CONTROL_CNTL_END(val64)) {
5167 if (!(val64 & I2C_CONTROL_NACK))
5176 if (sp->device_type == XFRAME_II_DEVICE) {
5177 int write_cnt = (cnt == 8) ? 0 : cnt;
5178 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5180 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5181 SPI_CONTROL_BYTECNT(write_cnt) |
5182 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5183 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5184 val64 |= SPI_CONTROL_REQ;
5185 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5186 while (exit_cnt < 5) {
5187 val64 = readq(&bar0->spi_control);
5188 if (val64 & SPI_CONTROL_NACK) {
5191 } else if (val64 & SPI_CONTROL_DONE) {
5201 static void s2io_vpd_read(struct s2io_nic *nic)
5205 int i=0, cnt, fail = 0;
5206 int vpd_addr = 0x80;
5208 if (nic->device_type == XFRAME_II_DEVICE) {
5209 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5213 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5216 strcpy(nic->serial_num, "NOT AVAILABLE");
5218 vpd_data = kmalloc(256, GFP_KERNEL);
5220 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5223 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5225 for (i = 0; i < 256; i +=4 ) {
5226 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5227 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5228 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5229 for (cnt = 0; cnt <5; cnt++) {
5231 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5236 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5240 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5241 (u32 *)&vpd_data[i]);
5245 /* read serial number of adapter */
5246 for (cnt = 0; cnt < 256; cnt++) {
5247 if ((vpd_data[cnt] == 'S') &&
5248 (vpd_data[cnt+1] == 'N') &&
5249 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5250 memset(nic->serial_num, 0, VPD_STRING_LEN);
5251 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5258 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5259 memset(nic->product_name, 0, vpd_data[1]);
5260 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5263 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5267 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5268 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5269 * @eeprom : pointer to the user level structure provided by ethtool,
5270 * containing all relevant information.
5271 * @data_buf : user defined value to be written into Eeprom.
5272 * Description: Reads the values stored in the Eeprom at given offset
5273 * for a given length. Stores these values int the input argument data
5274 * buffer 'data_buf' and returns these to the caller (ethtool.)
5279 static int s2io_ethtool_geeprom(struct net_device *dev,
5280 struct ethtool_eeprom *eeprom, u8 * data_buf)
5284 struct s2io_nic *sp = dev->priv;
5286 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5288 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5289 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5291 for (i = 0; i < eeprom->len; i += 4) {
5292 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5293 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5297 memcpy((data_buf + i), &valid, 4);
5303 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5304 * @sp : private member of the device structure, which is a pointer to the
5305 * s2io_nic structure.
5306 * @eeprom : pointer to the user level structure provided by ethtool,
5307 * containing all relevant information.
5308 * @data_buf ; user defined value to be written into Eeprom.
5310 * Tries to write the user provided value in the Eeprom, at the offset
5311 * given by the user.
5313 * 0 on success, -EFAULT on failure.
5316 static int s2io_ethtool_seeprom(struct net_device *dev,
5317 struct ethtool_eeprom *eeprom,
5320 int len = eeprom->len, cnt = 0;
5321 u64 valid = 0, data;
5322 struct s2io_nic *sp = dev->priv;
5324 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5326 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5327 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5333 data = (u32) data_buf[cnt] & 0x000000FF;
5335 valid = (u32) (data << 24);
5339 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5341 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5343 "write into the specified offset\n");
5354 * s2io_register_test - reads and writes into all clock domains.
5355 * @sp : private member of the device structure, which is a pointer to the
5356 * s2io_nic structure.
5357 * @data : variable that returns the result of each of the test conducted b
5360 * Read and write into all clock domains. The NIC has 3 clock domains,
5361 * see that registers in all the three regions are accessible.
5366 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5368 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5369 u64 val64 = 0, exp_val;
5372 val64 = readq(&bar0->pif_rd_swapper_fb);
5373 if (val64 != 0x123456789abcdefULL) {
5375 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5378 val64 = readq(&bar0->rmac_pause_cfg);
5379 if (val64 != 0xc000ffff00000000ULL) {
5381 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5384 val64 = readq(&bar0->rx_queue_cfg);
5385 if (sp->device_type == XFRAME_II_DEVICE)
5386 exp_val = 0x0404040404040404ULL;
5388 exp_val = 0x0808080808080808ULL;
5389 if (val64 != exp_val) {
5391 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5394 val64 = readq(&bar0->xgxs_efifo_cfg);
5395 if (val64 != 0x000000001923141EULL) {
5397 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5400 val64 = 0x5A5A5A5A5A5A5A5AULL;
5401 writeq(val64, &bar0->xmsi_data);
5402 val64 = readq(&bar0->xmsi_data);
5403 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5405 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5408 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5409 writeq(val64, &bar0->xmsi_data);
5410 val64 = readq(&bar0->xmsi_data);
5411 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5413 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5421 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5422 * @sp : private member of the device structure, which is a pointer to the
5423 * s2io_nic structure.
5424 * @data:variable that returns the result of each of the test conducted by
5427 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5433 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5436 u64 ret_data, org_4F0, org_7F0;
5437 u8 saved_4F0 = 0, saved_7F0 = 0;
5438 struct net_device *dev = sp->dev;
5440 /* Test Write Error at offset 0 */
5441 /* Note that SPI interface allows write access to all areas
5442 * of EEPROM. Hence doing all negative testing only for Xframe I.
5444 if (sp->device_type == XFRAME_I_DEVICE)
5445 if (!write_eeprom(sp, 0, 0, 3))
5448 /* Save current values at offsets 0x4F0 and 0x7F0 */
5449 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5451 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5454 /* Test Write at offset 4f0 */
5455 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5457 if (read_eeprom(sp, 0x4F0, &ret_data))
5460 if (ret_data != 0x012345) {
5461 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5462 "Data written %llx Data read %llx\n",
5463 dev->name, (unsigned long long)0x12345,
5464 (unsigned long long)ret_data);
5468 /* Reset the EEPROM data go FFFF */
5469 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5471 /* Test Write Request Error at offset 0x7c */
5472 if (sp->device_type == XFRAME_I_DEVICE)
5473 if (!write_eeprom(sp, 0x07C, 0, 3))
5476 /* Test Write Request at offset 0x7f0 */
5477 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5479 if (read_eeprom(sp, 0x7F0, &ret_data))
5482 if (ret_data != 0x012345) {
5483 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5484 "Data written %llx Data read %llx\n",
5485 dev->name, (unsigned long long)0x12345,
5486 (unsigned long long)ret_data);
5490 /* Reset the EEPROM data go FFFF */
5491 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5493 if (sp->device_type == XFRAME_I_DEVICE) {
5494 /* Test Write Error at offset 0x80 */
5495 if (!write_eeprom(sp, 0x080, 0, 3))
5498 /* Test Write Error at offset 0xfc */
5499 if (!write_eeprom(sp, 0x0FC, 0, 3))
5502 /* Test Write Error at offset 0x100 */
5503 if (!write_eeprom(sp, 0x100, 0, 3))
5506 /* Test Write Error at offset 4ec */
5507 if (!write_eeprom(sp, 0x4EC, 0, 3))
5511 /* Restore values at offsets 0x4F0 and 0x7F0 */
5513 write_eeprom(sp, 0x4F0, org_4F0, 3);
5515 write_eeprom(sp, 0x7F0, org_7F0, 3);
5522 * s2io_bist_test - invokes the MemBist test of the card .
5523 * @sp : private member of the device structure, which is a pointer to the
5524 * s2io_nic structure.
5525 * @data:variable that returns the result of each of the test conducted by
5528 * This invokes the MemBist test of the card. We give around
5529 * 2 secs time for the Test to complete. If it's still not complete
5530 * within this peiod, we consider that the test failed.
5532 * 0 on success and -1 on failure.
5535 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5538 int cnt = 0, ret = -1;
5540 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5541 bist |= PCI_BIST_START;
5542 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5545 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5546 if (!(bist & PCI_BIST_START)) {
5547 *data = (bist & PCI_BIST_CODE_MASK);
5559 * s2io-link_test - verifies the link state of the nic
5560 * @sp ; private member of the device structure, which is a pointer to the
5561 * s2io_nic structure.
5562 * @data: variable that returns the result of each of the test conducted by
5565 * The function verifies the link state of the NIC and updates the input
5566 * argument 'data' appropriately.
5571 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5573 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5576 val64 = readq(&bar0->adapter_status);
5577 if(!(LINK_IS_UP(val64)))
5586 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5587 * @sp - private member of the device structure, which is a pointer to the
5588 * s2io_nic structure.
5589 * @data - variable that returns the result of each of the test
5590 * conducted by the driver.
5592 * This is one of the offline test that tests the read and write
5593 * access to the RldRam chip on the NIC.
5598 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5600 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5602 int cnt, iteration = 0, test_fail = 0;
5604 val64 = readq(&bar0->adapter_control);
5605 val64 &= ~ADAPTER_ECC_EN;
5606 writeq(val64, &bar0->adapter_control);
5608 val64 = readq(&bar0->mc_rldram_test_ctrl);
5609 val64 |= MC_RLDRAM_TEST_MODE;
5610 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5612 val64 = readq(&bar0->mc_rldram_mrs);
5613 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5614 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5616 val64 |= MC_RLDRAM_MRS_ENABLE;
5617 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5619 while (iteration < 2) {
5620 val64 = 0x55555555aaaa0000ULL;
5621 if (iteration == 1) {
5622 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5624 writeq(val64, &bar0->mc_rldram_test_d0);
5626 val64 = 0xaaaa5a5555550000ULL;
5627 if (iteration == 1) {
5628 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5630 writeq(val64, &bar0->mc_rldram_test_d1);
5632 val64 = 0x55aaaaaaaa5a0000ULL;
5633 if (iteration == 1) {
5634 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5636 writeq(val64, &bar0->mc_rldram_test_d2);
5638 val64 = (u64) (0x0000003ffffe0100ULL);
5639 writeq(val64, &bar0->mc_rldram_test_add);
5641 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5643 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5645 for (cnt = 0; cnt < 5; cnt++) {
5646 val64 = readq(&bar0->mc_rldram_test_ctrl);
5647 if (val64 & MC_RLDRAM_TEST_DONE)
5655 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5656 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5658 for (cnt = 0; cnt < 5; cnt++) {
5659 val64 = readq(&bar0->mc_rldram_test_ctrl);
5660 if (val64 & MC_RLDRAM_TEST_DONE)
5668 val64 = readq(&bar0->mc_rldram_test_ctrl);
5669 if (!(val64 & MC_RLDRAM_TEST_PASS))
5677 /* Bring the adapter out of test mode */
5678 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5684 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5685 * @sp : private member of the device structure, which is a pointer to the
5686 * s2io_nic structure.
5687 * @ethtest : pointer to a ethtool command specific structure that will be
5688 * returned to the user.
5689 * @data : variable that returns the result of each of the test
5690 * conducted by the driver.
5692 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5693 * the health of the card.
5698 static void s2io_ethtool_test(struct net_device *dev,
5699 struct ethtool_test *ethtest,
5702 struct s2io_nic *sp = dev->priv;
5703 int orig_state = netif_running(sp->dev);
5705 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5706 /* Offline Tests. */
5708 s2io_close(sp->dev);
5710 if (s2io_register_test(sp, &data[0]))
5711 ethtest->flags |= ETH_TEST_FL_FAILED;
5715 if (s2io_rldram_test(sp, &data[3]))
5716 ethtest->flags |= ETH_TEST_FL_FAILED;
5720 if (s2io_eeprom_test(sp, &data[1]))
5721 ethtest->flags |= ETH_TEST_FL_FAILED;
5723 if (s2io_bist_test(sp, &data[4]))
5724 ethtest->flags |= ETH_TEST_FL_FAILED;
5734 "%s: is not up, cannot run test\n",
5743 if (s2io_link_test(sp, &data[2]))
5744 ethtest->flags |= ETH_TEST_FL_FAILED;
5753 static void s2io_get_ethtool_stats(struct net_device *dev,
5754 struct ethtool_stats *estats,
5758 struct s2io_nic *sp = dev->priv;
5759 struct stat_block *stat_info = sp->mac_control.stats_info;
5761 s2io_updt_stats(sp);
5763 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5764 le32_to_cpu(stat_info->tmac_frms);
5766 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5767 le32_to_cpu(stat_info->tmac_data_octets);
5768 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5770 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5771 le32_to_cpu(stat_info->tmac_mcst_frms);
5773 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5774 le32_to_cpu(stat_info->tmac_bcst_frms);
5775 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5777 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5778 le32_to_cpu(stat_info->tmac_ttl_octets);
5780 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5781 le32_to_cpu(stat_info->tmac_ucst_frms);
5783 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5784 le32_to_cpu(stat_info->tmac_nucst_frms);
5786 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5787 le32_to_cpu(stat_info->tmac_any_err_frms);
5788 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5789 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5791 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5792 le32_to_cpu(stat_info->tmac_vld_ip);
5794 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5795 le32_to_cpu(stat_info->tmac_drop_ip);
5797 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5798 le32_to_cpu(stat_info->tmac_icmp);
5800 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5801 le32_to_cpu(stat_info->tmac_rst_tcp);
5802 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5803 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5804 le32_to_cpu(stat_info->tmac_udp);
5806 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5807 le32_to_cpu(stat_info->rmac_vld_frms);
5809 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5810 le32_to_cpu(stat_info->rmac_data_octets);
5811 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5812 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5814 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5815 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5817 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5818 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5819 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5820 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5821 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5822 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5823 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5825 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5826 le32_to_cpu(stat_info->rmac_ttl_octets);
5828 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5829 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5831 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5832 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5834 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5835 le32_to_cpu(stat_info->rmac_discarded_frms);
5837 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5838 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5839 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5840 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5842 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5843 le32_to_cpu(stat_info->rmac_usized_frms);
5845 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5846 le32_to_cpu(stat_info->rmac_osized_frms);
5848 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5849 le32_to_cpu(stat_info->rmac_frag_frms);
5851 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5852 le32_to_cpu(stat_info->rmac_jabber_frms);
5853 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5854 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5855 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5856 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5857 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5858 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5860 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5861 le32_to_cpu(stat_info->rmac_ip);
5862 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5863 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5865 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5866 le32_to_cpu(stat_info->rmac_drop_ip);
5868 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5869 le32_to_cpu(stat_info->rmac_icmp);
5870 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5872 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5873 le32_to_cpu(stat_info->rmac_udp);
5875 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5876 le32_to_cpu(stat_info->rmac_err_drp_udp);
5877 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5878 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5879 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5880 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5881 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5882 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5883 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5884 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5885 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5886 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5887 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5888 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5889 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5890 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5891 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5892 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5893 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5895 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5896 le32_to_cpu(stat_info->rmac_pause_cnt);
5897 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5898 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5900 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5901 le32_to_cpu(stat_info->rmac_accepted_ip);
5902 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5903 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5904 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5905 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5906 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5907 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5908 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5909 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5910 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5911 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5912 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5913 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5914 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5915 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5916 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5917 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5918 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5919 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5920 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5922 /* Enhanced statistics exist only for Hercules */
5923 if(sp->device_type == XFRAME_II_DEVICE) {
5925 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5927 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5929 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5931 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5932 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5933 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5935 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5942 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5946 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5947 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5948 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5949 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5950 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5951 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5952 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5953 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5954 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5955 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5956 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5957 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5958 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5959 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5960 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5961 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5962 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5963 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5964 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5965 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5966 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5967 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5968 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5969 if (stat_info->sw_stat.num_aggregations) {
5970 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5973 * Since 64-bit divide does not work on all platforms,
5974 * do repeated subtraction.
5976 while (tmp >= stat_info->sw_stat.num_aggregations) {
5977 tmp -= stat_info->sw_stat.num_aggregations;
5980 tmp_stats[i++] = count;
5984 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5985 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5986 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5987 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5988 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5989 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5990 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5991 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5992 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5994 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5995 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5996 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5997 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5998 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6000 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6001 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6002 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6003 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6004 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6005 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6006 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6007 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6008 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6011 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6013 return (XENA_REG_SPACE);
6017 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6019 struct s2io_nic *sp = dev->priv;
6021 return (sp->rx_csum);
6024 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6026 struct s2io_nic *sp = dev->priv;
6036 static int s2io_get_eeprom_len(struct net_device *dev)
6038 return (XENA_EEPROM_SPACE);
6041 static int s2io_ethtool_self_test_count(struct net_device *dev)
6043 return (S2IO_TEST_LEN);
6046 static void s2io_ethtool_get_strings(struct net_device *dev,
6047 u32 stringset, u8 * data)
6050 struct s2io_nic *sp = dev->priv;
6052 switch (stringset) {
6054 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6057 stat_size = sizeof(ethtool_xena_stats_keys);
6058 memcpy(data, ðtool_xena_stats_keys,stat_size);
6059 if(sp->device_type == XFRAME_II_DEVICE) {
6060 memcpy(data + stat_size,
6061 ðtool_enhanced_stats_keys,
6062 sizeof(ethtool_enhanced_stats_keys));
6063 stat_size += sizeof(ethtool_enhanced_stats_keys);
6066 memcpy(data + stat_size, ðtool_driver_stats_keys,
6067 sizeof(ethtool_driver_stats_keys));
6070 static int s2io_ethtool_get_stats_count(struct net_device *dev)
6072 struct s2io_nic *sp = dev->priv;
6074 switch(sp->device_type) {
6075 case XFRAME_I_DEVICE:
6076 stat_count = XFRAME_I_STAT_LEN;
6079 case XFRAME_II_DEVICE:
6080 stat_count = XFRAME_II_STAT_LEN;
6087 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6090 dev->features |= NETIF_F_IP_CSUM;
6092 dev->features &= ~NETIF_F_IP_CSUM;
6097 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6099 return (dev->features & NETIF_F_TSO) != 0;
6101 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6104 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6106 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6111 static const struct ethtool_ops netdev_ethtool_ops = {
6112 .get_settings = s2io_ethtool_gset,
6113 .set_settings = s2io_ethtool_sset,
6114 .get_drvinfo = s2io_ethtool_gdrvinfo,
6115 .get_regs_len = s2io_ethtool_get_regs_len,
6116 .get_regs = s2io_ethtool_gregs,
6117 .get_link = ethtool_op_get_link,
6118 .get_eeprom_len = s2io_get_eeprom_len,
6119 .get_eeprom = s2io_ethtool_geeprom,
6120 .set_eeprom = s2io_ethtool_seeprom,
6121 .get_ringparam = s2io_ethtool_gringparam,
6122 .get_pauseparam = s2io_ethtool_getpause_data,
6123 .set_pauseparam = s2io_ethtool_setpause_data,
6124 .get_rx_csum = s2io_ethtool_get_rx_csum,
6125 .set_rx_csum = s2io_ethtool_set_rx_csum,
6126 .get_tx_csum = ethtool_op_get_tx_csum,
6127 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6128 .get_sg = ethtool_op_get_sg,
6129 .set_sg = ethtool_op_set_sg,
6130 .get_tso = s2io_ethtool_op_get_tso,
6131 .set_tso = s2io_ethtool_op_set_tso,
6132 .get_ufo = ethtool_op_get_ufo,
6133 .set_ufo = ethtool_op_set_ufo,
6134 .self_test_count = s2io_ethtool_self_test_count,
6135 .self_test = s2io_ethtool_test,
6136 .get_strings = s2io_ethtool_get_strings,
6137 .phys_id = s2io_ethtool_idnic,
6138 .get_stats_count = s2io_ethtool_get_stats_count,
6139 .get_ethtool_stats = s2io_get_ethtool_stats
6143 * s2io_ioctl - Entry point for the Ioctl
6144 * @dev : Device pointer.
6145 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6146 * a proprietary structure used to pass information to the driver.
6147 * @cmd : This is used to distinguish between the different commands that
6148 * can be passed to the IOCTL functions.
6150 * Currently there are no special functionality supported in IOCTL, hence
6151 * function always return EOPNOTSUPPORTED
6154 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6160 * s2io_change_mtu - entry point to change MTU size for the device.
6161 * @dev : device pointer.
6162 * @new_mtu : the new MTU size for the device.
6163 * Description: A driver entry point to change MTU size for the device.
6164 * Before changing the MTU the device must be stopped.
6166 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6170 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6172 struct s2io_nic *sp = dev->priv;
6174 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6175 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6181 if (netif_running(dev)) {
6183 netif_stop_queue(dev);
6184 if (s2io_card_up(sp)) {
6185 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6188 if (netif_queue_stopped(dev))
6189 netif_wake_queue(dev);
6190 } else { /* Device is down */
6191 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6192 u64 val64 = new_mtu;
6194 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6201 * s2io_tasklet - Bottom half of the ISR.
6202 * @dev_adr : address of the device structure in dma_addr_t format.
6204 * This is the tasklet or the bottom half of the ISR. This is
6205 * an extension of the ISR which is scheduled by the scheduler to be run
6206 * when the load on the CPU is low. All low priority tasks of the ISR can
6207 * be pushed into the tasklet. For now the tasklet is used only to
6208 * replenish the Rx buffers in the Rx buffer descriptors.
6213 static void s2io_tasklet(unsigned long dev_addr)
6215 struct net_device *dev = (struct net_device *) dev_addr;
6216 struct s2io_nic *sp = dev->priv;
6218 struct mac_info *mac_control;
6219 struct config_param *config;
6221 mac_control = &sp->mac_control;
6222 config = &sp->config;
6224 if (!TASKLET_IN_USE) {
6225 for (i = 0; i < config->rx_ring_num; i++) {
6226 ret = fill_rx_buffers(sp, i);
6227 if (ret == -ENOMEM) {
6228 DBG_PRINT(INFO_DBG, "%s: Out of ",
6230 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6232 } else if (ret == -EFILL) {
6234 "%s: Rx Ring %d is full\n",
6239 clear_bit(0, (&sp->tasklet_status));
6244 * s2io_set_link - Set the LInk status
6245 * @data: long pointer to device private structue
6246 * Description: Sets the link status for the adapter
6249 static void s2io_set_link(struct work_struct *work)
6251 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6252 struct net_device *dev = nic->dev;
6253 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6259 if (!netif_running(dev))
6262 if (test_and_set_bit(0, &(nic->link_state))) {
6263 /* The card is being reset, no point doing anything */
6267 subid = nic->pdev->subsystem_device;
6268 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6270 * Allow a small delay for the NICs self initiated
6271 * cleanup to complete.
6276 val64 = readq(&bar0->adapter_status);
6277 if (LINK_IS_UP(val64)) {
6278 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6279 if (verify_xena_quiescence(nic)) {
6280 val64 = readq(&bar0->adapter_control);
6281 val64 |= ADAPTER_CNTL_EN;
6282 writeq(val64, &bar0->adapter_control);
6283 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6284 nic->device_type, subid)) {
6285 val64 = readq(&bar0->gpio_control);
6286 val64 |= GPIO_CTRL_GPIO_0;
6287 writeq(val64, &bar0->gpio_control);
6288 val64 = readq(&bar0->gpio_control);
6290 val64 |= ADAPTER_LED_ON;
6291 writeq(val64, &bar0->adapter_control);
6293 nic->device_enabled_once = TRUE;
6295 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6296 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6297 netif_stop_queue(dev);
6300 val64 = readq(&bar0->adapter_control);
6301 val64 |= ADAPTER_LED_ON;
6302 writeq(val64, &bar0->adapter_control);
6303 s2io_link(nic, LINK_UP);
6305 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6307 val64 = readq(&bar0->gpio_control);
6308 val64 &= ~GPIO_CTRL_GPIO_0;
6309 writeq(val64, &bar0->gpio_control);
6310 val64 = readq(&bar0->gpio_control);
6313 val64 = readq(&bar0->adapter_control);
6314 val64 = val64 &(~ADAPTER_LED_ON);
6315 writeq(val64, &bar0->adapter_control);
6316 s2io_link(nic, LINK_DOWN);
6318 clear_bit(0, &(nic->link_state));
6324 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6326 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6327 u64 *temp2, int size)
6329 struct net_device *dev = sp->dev;
6330 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6332 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6333 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6336 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6338 * As Rx frame are not going to be processed,
6339 * using same mapped address for the Rxd
6342 rxdp1->Buffer0_ptr = *temp0;
6344 *skb = dev_alloc_skb(size);
6346 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6347 DBG_PRINT(INFO_DBG, "memory to allocate ");
6348 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6349 sp->mac_control.stats_info->sw_stat. \
6350 mem_alloc_fail_cnt++;
6353 sp->mac_control.stats_info->sw_stat.mem_allocated
6354 += (*skb)->truesize;
6355 /* storing the mapped addr in a temp variable
6356 * such it will be used for next rxd whose
6357 * Host Control is NULL
6359 rxdp1->Buffer0_ptr = *temp0 =
6360 pci_map_single( sp->pdev, (*skb)->data,
6361 size - NET_IP_ALIGN,
6362 PCI_DMA_FROMDEVICE);
6363 if( (rxdp1->Buffer0_ptr == 0) ||
6364 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6365 goto memalloc_failed;
6367 rxdp->Host_Control = (unsigned long) (*skb);
6369 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6370 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6371 /* Two buffer Mode */
6373 rxdp3->Buffer2_ptr = *temp2;
6374 rxdp3->Buffer0_ptr = *temp0;
6375 rxdp3->Buffer1_ptr = *temp1;
6377 *skb = dev_alloc_skb(size);
6379 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6380 DBG_PRINT(INFO_DBG, "memory to allocate ");
6381 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6382 sp->mac_control.stats_info->sw_stat. \
6383 mem_alloc_fail_cnt++;
6386 sp->mac_control.stats_info->sw_stat.mem_allocated
6387 += (*skb)->truesize;
6388 rxdp3->Buffer2_ptr = *temp2 =
6389 pci_map_single(sp->pdev, (*skb)->data,
6391 PCI_DMA_FROMDEVICE);
6392 if( (rxdp3->Buffer2_ptr == 0) ||
6393 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6394 goto memalloc_failed;
6396 rxdp3->Buffer0_ptr = *temp0 =
6397 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6398 PCI_DMA_FROMDEVICE);
6399 if( (rxdp3->Buffer0_ptr == 0) ||
6400 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6401 pci_unmap_single (sp->pdev,
6402 (dma_addr_t)rxdp3->Buffer2_ptr,
6403 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6404 goto memalloc_failed;
6406 rxdp->Host_Control = (unsigned long) (*skb);
6408 /* Buffer-1 will be dummy buffer not used */
6409 rxdp3->Buffer1_ptr = *temp1 =
6410 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6411 PCI_DMA_FROMDEVICE);
6412 if( (rxdp3->Buffer1_ptr == 0) ||
6413 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6414 pci_unmap_single (sp->pdev,
6415 (dma_addr_t)rxdp3->Buffer0_ptr,
6416 BUF0_LEN, PCI_DMA_FROMDEVICE);
6417 pci_unmap_single (sp->pdev,
6418 (dma_addr_t)rxdp3->Buffer2_ptr,
6419 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6420 goto memalloc_failed;
6426 stats->pci_map_fail_cnt++;
6427 stats->mem_freed += (*skb)->truesize;
6428 dev_kfree_skb(*skb);
6432 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6435 struct net_device *dev = sp->dev;
6436 if (sp->rxd_mode == RXD_MODE_1) {
6437 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6438 } else if (sp->rxd_mode == RXD_MODE_3B) {
6439 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6440 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6441 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6445 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6447 int i, j, k, blk_cnt = 0, size;
6448 struct mac_info * mac_control = &sp->mac_control;
6449 struct config_param *config = &sp->config;
6450 struct net_device *dev = sp->dev;
6451 struct RxD_t *rxdp = NULL;
6452 struct sk_buff *skb = NULL;
6453 struct buffAdd *ba = NULL;
6454 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6456 /* Calculate the size based on ring mode */
6457 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6458 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6459 if (sp->rxd_mode == RXD_MODE_1)
6460 size += NET_IP_ALIGN;
6461 else if (sp->rxd_mode == RXD_MODE_3B)
6462 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6464 for (i = 0; i < config->rx_ring_num; i++) {
6465 blk_cnt = config->rx_cfg[i].num_rxd /
6466 (rxd_count[sp->rxd_mode] +1);
6468 for (j = 0; j < blk_cnt; j++) {
6469 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6470 rxdp = mac_control->rings[i].
6471 rx_blocks[j].rxds[k].virt_addr;
6472 if(sp->rxd_mode == RXD_MODE_3B)
6473 ba = &mac_control->rings[i].ba[j][k];
6474 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6475 &skb,(u64 *)&temp0_64,
6482 set_rxd_buffer_size(sp, rxdp, size);
6484 /* flip the Ownership bit to Hardware */
6485 rxdp->Control_1 |= RXD_OWN_XENA;
6493 static int s2io_add_isr(struct s2io_nic * sp)
6496 struct net_device *dev = sp->dev;
6499 if (sp->intr_type == MSI_X)
6500 ret = s2io_enable_msi_x(sp);
6502 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6503 sp->intr_type = INTA;
6506 /* Store the values of the MSIX table in the struct s2io_nic structure */
6507 store_xmsi_data(sp);
6509 /* After proper initialization of H/W, register ISR */
6510 if (sp->intr_type == MSI_X) {
6511 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6513 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6514 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6515 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6517 err = request_irq(sp->entries[i].vector,
6518 s2io_msix_fifo_handle, 0, sp->desc[i],
6519 sp->s2io_entries[i].arg);
6520 /* If either data or addr is zero print it */
6521 if(!(sp->msix_info[i].addr &&
6522 sp->msix_info[i].data)) {
6523 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6524 "Data:0x%lx\n",sp->desc[i],
6525 (unsigned long long)
6526 sp->msix_info[i].addr,
6528 ntohl(sp->msix_info[i].data));
6533 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6535 err = request_irq(sp->entries[i].vector,
6536 s2io_msix_ring_handle, 0, sp->desc[i],
6537 sp->s2io_entries[i].arg);
6538 /* If either data or addr is zero print it */
6539 if(!(sp->msix_info[i].addr &&
6540 sp->msix_info[i].data)) {
6541 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6542 "Data:0x%lx\n",sp->desc[i],
6543 (unsigned long long)
6544 sp->msix_info[i].addr,
6546 ntohl(sp->msix_info[i].data));
6552 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6553 "failed\n", dev->name, i);
6554 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6557 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6559 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6560 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6562 if (sp->intr_type == INTA) {
6563 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6566 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6573 static void s2io_rem_isr(struct s2io_nic * sp)
6576 struct net_device *dev = sp->dev;
6577 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6579 if (sp->intr_type == MSI_X) {
6583 for (i=1; (sp->s2io_entries[i].in_use ==
6584 MSIX_REGISTERED_SUCCESS); i++) {
6585 int vector = sp->entries[i].vector;
6586 void *arg = sp->s2io_entries[i].arg;
6588 free_irq(vector, arg);
6593 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6594 kfree(sp->s2io_entries);
6596 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6598 sp->s2io_entries = NULL;
6600 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6601 msi_control &= 0xFFFE; /* Disable MSI */
6602 pci_write_config_word(sp->pdev, 0x42, msi_control);
6604 pci_disable_msix(sp->pdev);
6606 free_irq(sp->pdev->irq, dev);
6608 /* Waiting till all Interrupt handlers are complete */
6612 if (!atomic_read(&sp->isr_cnt))
6618 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6621 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6622 unsigned long flags;
6623 register u64 val64 = 0;
6625 del_timer_sync(&sp->alarm_timer);
6626 /* If s2io_set_link task is executing, wait till it completes. */
6627 while (test_and_set_bit(0, &(sp->link_state))) {
6630 atomic_set(&sp->card_state, CARD_DOWN);
6632 /* disable Tx and Rx traffic on the NIC */
6639 tasklet_kill(&sp->task);
6641 /* Check if the device is Quiescent and then Reset the NIC */
6643 /* As per the HW requirement we need to replenish the
6644 * receive buffer to avoid the ring bump. Since there is
6645 * no intention of processing the Rx frame at this pointwe are
6646 * just settting the ownership bit of rxd in Each Rx
6647 * ring to HW and set the appropriate buffer size
6648 * based on the ring mode
6650 rxd_owner_bit_reset(sp);
6652 val64 = readq(&bar0->adapter_status);
6653 if (verify_xena_quiescence(sp)) {
6654 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6662 "s2io_close:Device not Quiescent ");
6663 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6664 (unsigned long long) val64);
6671 spin_lock_irqsave(&sp->tx_lock, flags);
6672 /* Free all Tx buffers */
6673 free_tx_buffers(sp);
6674 spin_unlock_irqrestore(&sp->tx_lock, flags);
6676 /* Free all Rx buffers */
6677 spin_lock_irqsave(&sp->rx_lock, flags);
6678 free_rx_buffers(sp);
6679 spin_unlock_irqrestore(&sp->rx_lock, flags);
6681 clear_bit(0, &(sp->link_state));
6684 static void s2io_card_down(struct s2io_nic * sp)
6686 do_s2io_card_down(sp, 1);
6689 static int s2io_card_up(struct s2io_nic * sp)
6692 struct mac_info *mac_control;
6693 struct config_param *config;
6694 struct net_device *dev = (struct net_device *) sp->dev;
6697 /* Initialize the H/W I/O registers */
6698 if (init_nic(sp) != 0) {
6699 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6706 * Initializing the Rx buffers. For now we are considering only 1
6707 * Rx ring and initializing buffers into 30 Rx blocks
6709 mac_control = &sp->mac_control;
6710 config = &sp->config;
6712 for (i = 0; i < config->rx_ring_num; i++) {
6713 if ((ret = fill_rx_buffers(sp, i))) {
6714 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6717 free_rx_buffers(sp);
6720 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6721 atomic_read(&sp->rx_bufs_left[i]));
6723 /* Maintain the state prior to the open */
6724 if (sp->promisc_flg)
6725 sp->promisc_flg = 0;
6726 if (sp->m_cast_flg) {
6728 sp->all_multi_pos= 0;
6731 /* Setting its receive mode */
6732 s2io_set_multicast(dev);
6735 /* Initialize max aggregatable pkts per session based on MTU */
6736 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6737 /* Check if we can use(if specified) user provided value */
6738 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6739 sp->lro_max_aggr_per_sess = lro_max_pkts;
6742 /* Enable Rx Traffic and interrupts on the NIC */
6743 if (start_nic(sp)) {
6744 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6746 free_rx_buffers(sp);
6750 /* Add interrupt service routine */
6751 if (s2io_add_isr(sp) != 0) {
6752 if (sp->intr_type == MSI_X)
6755 free_rx_buffers(sp);
6759 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6761 /* Enable tasklet for the device */
6762 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6764 /* Enable select interrupts */
6765 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6766 if (sp->intr_type != INTA)
6767 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6769 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6770 interruptible |= TX_PIC_INTR;
6771 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6775 atomic_set(&sp->card_state, CARD_UP);
6780 * s2io_restart_nic - Resets the NIC.
6781 * @data : long pointer to the device private structure
6783 * This function is scheduled to be run by the s2io_tx_watchdog
6784 * function after 0.5 secs to reset the NIC. The idea is to reduce
6785 * the run time of the watch dog routine which is run holding a
6789 static void s2io_restart_nic(struct work_struct *work)
6791 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6792 struct net_device *dev = sp->dev;
6796 if (!netif_running(dev))
6800 if (s2io_card_up(sp)) {
6801 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6804 netif_wake_queue(dev);
6805 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6812 * s2io_tx_watchdog - Watchdog for transmit side.
6813 * @dev : Pointer to net device structure
6815 * This function is triggered if the Tx Queue is stopped
6816 * for a pre-defined amount of time when the Interface is still up.
6817 * If the Interface is jammed in such a situation, the hardware is
6818 * reset (by s2io_close) and restarted again (by s2io_open) to
6819 * overcome any problem that might have been caused in the hardware.
6824 static void s2io_tx_watchdog(struct net_device *dev)
6826 struct s2io_nic *sp = dev->priv;
6828 if (netif_carrier_ok(dev)) {
6829 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6830 schedule_work(&sp->rst_timer_task);
6831 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6836 * rx_osm_handler - To perform some OS related operations on SKB.
6837 * @sp: private member of the device structure,pointer to s2io_nic structure.
6838 * @skb : the socket buffer pointer.
6839 * @len : length of the packet
6840 * @cksum : FCS checksum of the frame.
6841 * @ring_no : the ring from which this RxD was extracted.
6843 * This function is called by the Rx interrupt serivce routine to perform
6844 * some OS related operations on the SKB before passing it to the upper
6845 * layers. It mainly checks if the checksum is OK, if so adds it to the
6846 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6847 * to the upper layer. If the checksum is wrong, it increments the Rx
6848 * packet error count, frees the SKB and returns error.
6850 * SUCCESS on success and -1 on failure.
6852 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6854 struct s2io_nic *sp = ring_data->nic;
6855 struct net_device *dev = (struct net_device *) sp->dev;
6856 struct sk_buff *skb = (struct sk_buff *)
6857 ((unsigned long) rxdp->Host_Control);
6858 int ring_no = ring_data->ring_no;
6859 u16 l3_csum, l4_csum;
6860 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6867 /* Check for parity error */
6869 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6871 err_mask = err >> 48;
6874 sp->mac_control.stats_info->sw_stat.
6875 rx_parity_err_cnt++;
6879 sp->mac_control.stats_info->sw_stat.
6884 sp->mac_control.stats_info->sw_stat.
6885 rx_parity_abort_cnt++;
6889 sp->mac_control.stats_info->sw_stat.
6894 sp->mac_control.stats_info->sw_stat.
6899 sp->mac_control.stats_info->sw_stat.
6904 sp->mac_control.stats_info->sw_stat.
6905 rx_buf_size_err_cnt++;
6909 sp->mac_control.stats_info->sw_stat.
6910 rx_rxd_corrupt_cnt++;
6914 sp->mac_control.stats_info->sw_stat.
6919 * Drop the packet if bad transfer code. Exception being
6920 * 0x5, which could be due to unsupported IPv6 extension header.
6921 * In this case, we let stack handle the packet.
6922 * Note that in this case, since checksum will be incorrect,
6923 * stack will validate the same.
6925 if (err_mask != 0x5) {
6926 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6927 dev->name, err_mask);
6928 sp->stats.rx_crc_errors++;
6929 sp->mac_control.stats_info->sw_stat.mem_freed
6932 atomic_dec(&sp->rx_bufs_left[ring_no]);
6933 rxdp->Host_Control = 0;
6938 /* Updating statistics */
6939 sp->stats.rx_packets++;
6940 rxdp->Host_Control = 0;
6941 if (sp->rxd_mode == RXD_MODE_1) {
6942 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6944 sp->stats.rx_bytes += len;
6947 } else if (sp->rxd_mode == RXD_MODE_3B) {
6948 int get_block = ring_data->rx_curr_get_info.block_index;
6949 int get_off = ring_data->rx_curr_get_info.offset;
6950 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6951 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6952 unsigned char *buff = skb_push(skb, buf0_len);
6954 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6955 sp->stats.rx_bytes += buf0_len + buf2_len;
6956 memcpy(buff, ba->ba_0, buf0_len);
6957 skb_put(skb, buf2_len);
6960 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6961 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6963 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6964 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6965 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6967 * NIC verifies if the Checksum of the received
6968 * frame is Ok or not and accordingly returns
6969 * a flag in the RxD.
6971 skb->ip_summed = CHECKSUM_UNNECESSARY;
6977 ret = s2io_club_tcp_session(skb->data, &tcp,
6978 &tcp_len, &lro, rxdp, sp);
6980 case 3: /* Begin anew */
6983 case 1: /* Aggregate */
6985 lro_append_pkt(sp, lro,
6989 case 4: /* Flush session */
6991 lro_append_pkt(sp, lro,
6993 queue_rx_frame(lro->parent);
6994 clear_lro_session(lro);
6995 sp->mac_control.stats_info->
6996 sw_stat.flush_max_pkts++;
6999 case 2: /* Flush both */
7000 lro->parent->data_len =
7002 sp->mac_control.stats_info->
7003 sw_stat.sending_both++;
7004 queue_rx_frame(lro->parent);
7005 clear_lro_session(lro);
7007 case 0: /* sessions exceeded */
7008 case -1: /* non-TCP or not
7012 * First pkt in session not
7013 * L3/L4 aggregatable
7018 "%s: Samadhana!!\n",
7025 * Packet with erroneous checksum, let the
7026 * upper layers deal with it.
7028 skb->ip_summed = CHECKSUM_NONE;
7031 skb->ip_summed = CHECKSUM_NONE;
7033 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7035 skb->protocol = eth_type_trans(skb, dev);
7036 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7038 /* Queueing the vlan frame to the upper layer */
7040 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7041 RXD_GET_VLAN_TAG(rxdp->Control_2));
7043 vlan_hwaccel_rx(skb, sp->vlgrp,
7044 RXD_GET_VLAN_TAG(rxdp->Control_2));
7047 netif_receive_skb(skb);
7053 queue_rx_frame(skb);
7055 dev->last_rx = jiffies;
7057 atomic_dec(&sp->rx_bufs_left[ring_no]);
7062 * s2io_link - stops/starts the Tx queue.
7063 * @sp : private member of the device structure, which is a pointer to the
7064 * s2io_nic structure.
7065 * @link : inidicates whether link is UP/DOWN.
7067 * This function stops/starts the Tx queue depending on whether the link
7068 * status of the NIC is is down or up. This is called by the Alarm
7069 * interrupt handler whenever a link change interrupt comes up.
7074 static void s2io_link(struct s2io_nic * sp, int link)
7076 struct net_device *dev = (struct net_device *) sp->dev;
7078 if (link != sp->last_link_state) {
7079 if (link == LINK_DOWN) {
7080 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7081 netif_carrier_off(dev);
7082 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7083 sp->mac_control.stats_info->sw_stat.link_up_time =
7084 jiffies - sp->start_time;
7085 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7087 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7088 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7089 sp->mac_control.stats_info->sw_stat.link_down_time =
7090 jiffies - sp->start_time;
7091 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7092 netif_carrier_on(dev);
7095 sp->last_link_state = link;
7096 sp->start_time = jiffies;
7100 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7101 * @sp : private member of the device structure, which is a pointer to the
7102 * s2io_nic structure.
7104 * This function initializes a few of the PCI and PCI-X configuration registers
7105 * with recommended values.
7110 static void s2io_init_pci(struct s2io_nic * sp)
7112 u16 pci_cmd = 0, pcix_cmd = 0;
7114 /* Enable Data Parity Error Recovery in PCI-X command register. */
7115 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7117 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7119 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7122 /* Set the PErr Response bit in PCI command register. */
7123 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7124 pci_write_config_word(sp->pdev, PCI_COMMAND,
7125 (pci_cmd | PCI_COMMAND_PARITY));
7126 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7129 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7131 if ( tx_fifo_num > 8) {
7132 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7134 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7137 if ( rx_ring_num > 8) {
7138 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7140 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7143 if (*dev_intr_type != INTA)
7146 #ifndef CONFIG_PCI_MSI
7147 if (*dev_intr_type != INTA) {
7148 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7149 "MSI/MSI-X. Defaulting to INTA\n");
7150 *dev_intr_type = INTA;
7153 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7154 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7155 "Defaulting to INTA\n");
7156 *dev_intr_type = INTA;
7159 if ((*dev_intr_type == MSI_X) &&
7160 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7161 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7162 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7163 "Defaulting to INTA\n");
7164 *dev_intr_type = INTA;
7167 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7168 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7169 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7176 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7177 * or Traffic class respectively.
7178 * @nic: device peivate variable
7179 * Description: The function configures the receive steering to
7180 * desired receive ring.
7181 * Return Value: SUCCESS on success and
7182 * '-1' on failure (endian settings incorrect).
7184 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7186 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7187 register u64 val64 = 0;
7189 if (ds_codepoint > 63)
7192 val64 = RTS_DS_MEM_DATA(ring);
7193 writeq(val64, &bar0->rts_ds_mem_data);
7195 val64 = RTS_DS_MEM_CTRL_WE |
7196 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7197 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7199 writeq(val64, &bar0->rts_ds_mem_ctrl);
7201 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7202 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7207 * s2io_init_nic - Initialization of the adapter .
7208 * @pdev : structure containing the PCI related information of the device.
7209 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7211 * The function initializes an adapter identified by the pci_dec structure.
7212 * All OS related initialization including memory and device structure and
7213 * initlaization of the device private variable is done. Also the swapper
7214 * control register is initialized to enable read and write into the I/O
7215 * registers of the device.
7217 * returns 0 on success and negative on failure.
7220 static int __devinit
7221 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7223 struct s2io_nic *sp;
7224 struct net_device *dev;
7226 int dma_flag = FALSE;
7227 u32 mac_up, mac_down;
7228 u64 val64 = 0, tmp64 = 0;
7229 struct XENA_dev_config __iomem *bar0 = NULL;
7231 struct mac_info *mac_control;
7232 struct config_param *config;
7234 u8 dev_intr_type = intr_type;
7236 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7239 if ((ret = pci_enable_device(pdev))) {
7241 "s2io_init_nic: pci_enable_device failed\n");
7245 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7246 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7248 if (pci_set_consistent_dma_mask
7249 (pdev, DMA_64BIT_MASK)) {
7251 "Unable to obtain 64bit DMA for \
7252 consistent allocations\n");
7253 pci_disable_device(pdev);
7256 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7257 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7259 pci_disable_device(pdev);
7262 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7263 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7264 pci_disable_device(pdev);
7268 dev = alloc_etherdev(sizeof(struct s2io_nic));
7270 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7271 pci_disable_device(pdev);
7272 pci_release_regions(pdev);
7276 pci_set_master(pdev);
7277 pci_set_drvdata(pdev, dev);
7278 SET_MODULE_OWNER(dev);
7279 SET_NETDEV_DEV(dev, &pdev->dev);
7281 /* Private member variable initialized to s2io NIC structure */
7283 memset(sp, 0, sizeof(struct s2io_nic));
7286 sp->high_dma_flag = dma_flag;
7287 sp->device_enabled_once = FALSE;
7288 if (rx_ring_mode == 1)
7289 sp->rxd_mode = RXD_MODE_1;
7290 if (rx_ring_mode == 2)
7291 sp->rxd_mode = RXD_MODE_3B;
7293 sp->intr_type = dev_intr_type;
7295 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7296 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7297 sp->device_type = XFRAME_II_DEVICE;
7299 sp->device_type = XFRAME_I_DEVICE;
7303 /* Initialize some PCI/PCI-X fields of the NIC. */
7307 * Setting the device configuration parameters.
7308 * Most of these parameters can be specified by the user during
7309 * module insertion as they are module loadable parameters. If
7310 * these parameters are not not specified during load time, they
7311 * are initialized with default values.
7313 mac_control = &sp->mac_control;
7314 config = &sp->config;
7316 /* Tx side parameters. */
7317 config->tx_fifo_num = tx_fifo_num;
7318 for (i = 0; i < MAX_TX_FIFOS; i++) {
7319 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7320 config->tx_cfg[i].fifo_priority = i;
7323 /* mapping the QoS priority to the configured fifos */
7324 for (i = 0; i < MAX_TX_FIFOS; i++)
7325 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7327 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7328 for (i = 0; i < config->tx_fifo_num; i++) {
7329 config->tx_cfg[i].f_no_snoop =
7330 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7331 if (config->tx_cfg[i].fifo_len < 65) {
7332 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7336 /* + 2 because one Txd for skb->data and one Txd for UFO */
7337 config->max_txds = MAX_SKB_FRAGS + 2;
7339 /* Rx side parameters. */
7340 config->rx_ring_num = rx_ring_num;
7341 for (i = 0; i < MAX_RX_RINGS; i++) {
7342 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7343 (rxd_count[sp->rxd_mode] + 1);
7344 config->rx_cfg[i].ring_priority = i;
7347 for (i = 0; i < rx_ring_num; i++) {
7348 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7349 config->rx_cfg[i].f_no_snoop =
7350 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7353 /* Setting Mac Control parameters */
7354 mac_control->rmac_pause_time = rmac_pause_time;
7355 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7356 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7359 /* Initialize Ring buffer parameters. */
7360 for (i = 0; i < config->rx_ring_num; i++)
7361 atomic_set(&sp->rx_bufs_left[i], 0);
7363 /* Initialize the number of ISRs currently running */
7364 atomic_set(&sp->isr_cnt, 0);
7366 /* initialize the shared memory used by the NIC and the host */
7367 if (init_shared_mem(sp)) {
7368 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7371 goto mem_alloc_failed;
7374 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7375 pci_resource_len(pdev, 0));
7377 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7380 goto bar0_remap_failed;
7383 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7384 pci_resource_len(pdev, 2));
7386 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7389 goto bar1_remap_failed;
7392 dev->irq = pdev->irq;
7393 dev->base_addr = (unsigned long) sp->bar0;
7395 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7396 for (j = 0; j < MAX_TX_FIFOS; j++) {
7397 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7398 (sp->bar1 + (j * 0x00020000));
7401 /* Driver entry points */
7402 dev->open = &s2io_open;
7403 dev->stop = &s2io_close;
7404 dev->hard_start_xmit = &s2io_xmit;
7405 dev->get_stats = &s2io_get_stats;
7406 dev->set_multicast_list = &s2io_set_multicast;
7407 dev->do_ioctl = &s2io_ioctl;
7408 dev->change_mtu = &s2io_change_mtu;
7409 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7410 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7411 dev->vlan_rx_register = s2io_vlan_rx_register;
7414 * will use eth_mac_addr() for dev->set_mac_address
7415 * mac address will be set every time dev->open() is called
7417 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7419 #ifdef CONFIG_NET_POLL_CONTROLLER
7420 dev->poll_controller = s2io_netpoll;
7423 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7424 if (sp->high_dma_flag == TRUE)
7425 dev->features |= NETIF_F_HIGHDMA;
7426 dev->features |= NETIF_F_TSO;
7427 dev->features |= NETIF_F_TSO6;
7428 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7429 dev->features |= NETIF_F_UFO;
7430 dev->features |= NETIF_F_HW_CSUM;
7433 dev->tx_timeout = &s2io_tx_watchdog;
7434 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7435 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7436 INIT_WORK(&sp->set_link_task, s2io_set_link);
7438 pci_save_state(sp->pdev);
7440 /* Setting swapper control on the NIC, for proper reset operation */
7441 if (s2io_set_swapper(sp)) {
7442 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7445 goto set_swap_failed;
7448 /* Verify if the Herc works on the slot its placed into */
7449 if (sp->device_type & XFRAME_II_DEVICE) {
7450 mode = s2io_verify_pci_mode(sp);
7452 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7453 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7455 goto set_swap_failed;
7459 /* Not needed for Herc */
7460 if (sp->device_type & XFRAME_I_DEVICE) {
7462 * Fix for all "FFs" MAC address problems observed on
7465 fix_mac_address(sp);
7470 * MAC address initialization.
7471 * For now only one mac address will be read and used.
7474 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7475 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7476 writeq(val64, &bar0->rmac_addr_cmd_mem);
7477 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7478 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7479 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7480 mac_down = (u32) tmp64;
7481 mac_up = (u32) (tmp64 >> 32);
7483 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7484 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7485 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7486 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7487 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7488 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7490 /* Set the factory defined MAC address initially */
7491 dev->addr_len = ETH_ALEN;
7492 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7494 /* Store the values of the MSIX table in the s2io_nic structure */
7495 store_xmsi_data(sp);
7496 /* reset Nic and bring it to known state */
7500 * Initialize the tasklet status and link state flags
7501 * and the card state parameter
7503 atomic_set(&(sp->card_state), 0);
7504 sp->tasklet_status = 0;
7507 /* Initialize spinlocks */
7508 spin_lock_init(&sp->tx_lock);
7511 spin_lock_init(&sp->put_lock);
7512 spin_lock_init(&sp->rx_lock);
7515 * SXE-002: Configure link and activity LED to init state
7518 subid = sp->pdev->subsystem_device;
7519 if ((subid & 0xFF) >= 0x07) {
7520 val64 = readq(&bar0->gpio_control);
7521 val64 |= 0x0000800000000000ULL;
7522 writeq(val64, &bar0->gpio_control);
7523 val64 = 0x0411040400000000ULL;
7524 writeq(val64, (void __iomem *) bar0 + 0x2700);
7525 val64 = readq(&bar0->gpio_control);
7528 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7530 if (register_netdev(dev)) {
7531 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7533 goto register_failed;
7536 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7537 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7538 sp->product_name, pdev->revision);
7539 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7540 s2io_driver_version);
7541 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7542 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7543 sp->def_mac_addr[0].mac_addr[0],
7544 sp->def_mac_addr[0].mac_addr[1],
7545 sp->def_mac_addr[0].mac_addr[2],
7546 sp->def_mac_addr[0].mac_addr[3],
7547 sp->def_mac_addr[0].mac_addr[4],
7548 sp->def_mac_addr[0].mac_addr[5]);
7549 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7550 if (sp->device_type & XFRAME_II_DEVICE) {
7551 mode = s2io_print_pci_mode(sp);
7553 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7555 unregister_netdev(dev);
7556 goto set_swap_failed;
7559 switch(sp->rxd_mode) {
7561 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7565 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7571 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7572 switch(sp->intr_type) {
7574 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7577 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7581 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7584 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7585 " enabled\n", dev->name);
7586 /* Initialize device name */
7587 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7589 /* Initialize bimodal Interrupts */
7590 sp->config.bimodal = bimodal;
7591 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7592 sp->config.bimodal = 0;
7593 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7598 * Make Link state as off at this point, when the Link change
7599 * interrupt comes the state will be automatically changed to
7602 netif_carrier_off(dev);
7613 free_shared_mem(sp);
7614 pci_disable_device(pdev);
7615 pci_release_regions(pdev);
7616 pci_set_drvdata(pdev, NULL);
7623 * s2io_rem_nic - Free the PCI device
7624 * @pdev: structure containing the PCI related information of the device.
7625 * Description: This function is called by the Pci subsystem to release a
7626 * PCI device and free up all resource held up by the device. This could
7627 * be in response to a Hot plug event or when the driver is to be removed
7631 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7633 struct net_device *dev =
7634 (struct net_device *) pci_get_drvdata(pdev);
7635 struct s2io_nic *sp;
7638 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7642 flush_scheduled_work();
7645 unregister_netdev(dev);
7647 free_shared_mem(sp);
7650 pci_release_regions(pdev);
7651 pci_set_drvdata(pdev, NULL);
7653 pci_disable_device(pdev);
7657 * s2io_starter - Entry point for the driver
7658 * Description: This function is the entry point for the driver. It verifies
7659 * the module loadable parameters and initializes PCI configuration space.
7662 int __init s2io_starter(void)
7664 return pci_register_driver(&s2io_driver);
7668 * s2io_closer - Cleanup routine for the driver
7669 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7672 static __exit void s2io_closer(void)
7674 pci_unregister_driver(&s2io_driver);
7675 DBG_PRINT(INIT_DBG, "cleanup done\n");
7678 module_init(s2io_starter);
7679 module_exit(s2io_closer);
7681 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7682 struct tcphdr **tcp, struct RxD_t *rxdp)
7685 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7687 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7688 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7694 * By default the VLAN field in the MAC is stripped by the card, if this
7695 * feature is turned off in rx_pa_cfg register, then the ip_off field
7696 * has to be shifted by a further 2 bytes
7699 case 0: /* DIX type */
7700 case 4: /* DIX type with VLAN */
7701 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7703 /* LLC, SNAP etc are considered non-mergeable */
7708 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7709 ip_len = (u8)((*ip)->ihl);
7711 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7716 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7719 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7720 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7721 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7726 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7728 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7731 static void initiate_new_session(struct lro *lro, u8 *l2h,
7732 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7734 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7738 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7739 lro->tcp_ack = ntohl(tcp->ack_seq);
7741 lro->total_len = ntohs(ip->tot_len);
7744 * check if we saw TCP timestamp. Other consistency checks have
7745 * already been done.
7747 if (tcp->doff == 8) {
7749 ptr = (u32 *)(tcp+1);
7751 lro->cur_tsval = *(ptr+1);
7752 lro->cur_tsecr = *(ptr+2);
7757 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7759 struct iphdr *ip = lro->iph;
7760 struct tcphdr *tcp = lro->tcph;
7762 struct stat_block *statinfo = sp->mac_control.stats_info;
7763 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7765 /* Update L3 header */
7766 ip->tot_len = htons(lro->total_len);
7768 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7771 /* Update L4 header */
7772 tcp->ack_seq = lro->tcp_ack;
7773 tcp->window = lro->window;
7775 /* Update tsecr field if this session has timestamps enabled */
7777 u32 *ptr = (u32 *)(tcp + 1);
7778 *(ptr+2) = lro->cur_tsecr;
7781 /* Update counters required for calculation of
7782 * average no. of packets aggregated.
7784 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7785 statinfo->sw_stat.num_aggregations++;
7788 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7789 struct tcphdr *tcp, u32 l4_pyld)
7791 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7792 lro->total_len += l4_pyld;
7793 lro->frags_len += l4_pyld;
7794 lro->tcp_next_seq += l4_pyld;
7797 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7798 lro->tcp_ack = tcp->ack_seq;
7799 lro->window = tcp->window;
7803 /* Update tsecr and tsval from this packet */
7804 ptr = (u32 *) (tcp + 1);
7805 lro->cur_tsval = *(ptr + 1);
7806 lro->cur_tsecr = *(ptr + 2);
7810 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7811 struct tcphdr *tcp, u32 tcp_pyld_len)
7815 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7817 if (!tcp_pyld_len) {
7818 /* Runt frame or a pure ack */
7822 if (ip->ihl != 5) /* IP has options */
7825 /* If we see CE codepoint in IP header, packet is not mergeable */
7826 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7829 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7830 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7831 tcp->ece || tcp->cwr || !tcp->ack) {
7833 * Currently recognize only the ack control word and
7834 * any other control field being set would result in
7835 * flushing the LRO session
7841 * Allow only one TCP timestamp option. Don't aggregate if
7842 * any other options are detected.
7844 if (tcp->doff != 5 && tcp->doff != 8)
7847 if (tcp->doff == 8) {
7848 ptr = (u8 *)(tcp + 1);
7849 while (*ptr == TCPOPT_NOP)
7851 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7854 /* Ensure timestamp value increases monotonically */
7856 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7859 /* timestamp echo reply should be non-zero */
7860 if (*((u32 *)(ptr+6)) == 0)
7868 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7869 struct RxD_t *rxdp, struct s2io_nic *sp)
7872 struct tcphdr *tcph;
7875 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7877 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7878 ip->saddr, ip->daddr);
7883 tcph = (struct tcphdr *)*tcp;
7884 *tcp_len = get_l4_pyld_length(ip, tcph);
7885 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7886 struct lro *l_lro = &sp->lro0_n[i];
7887 if (l_lro->in_use) {
7888 if (check_for_socket_match(l_lro, ip, tcph))
7890 /* Sock pair matched */
7893 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7894 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7895 "0x%x, actual 0x%x\n", __FUNCTION__,
7896 (*lro)->tcp_next_seq,
7899 sp->mac_control.stats_info->
7900 sw_stat.outof_sequence_pkts++;
7905 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7906 ret = 1; /* Aggregate */
7908 ret = 2; /* Flush both */
7914 /* Before searching for available LRO objects,
7915 * check if the pkt is L3/L4 aggregatable. If not
7916 * don't create new LRO session. Just send this
7919 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7923 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7924 struct lro *l_lro = &sp->lro0_n[i];
7925 if (!(l_lro->in_use)) {
7927 ret = 3; /* Begin anew */
7933 if (ret == 0) { /* sessions exceeded */
7934 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7942 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7945 update_L3L4_header(sp, *lro);
7948 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7949 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7950 update_L3L4_header(sp, *lro);
7951 ret = 4; /* Flush the LRO */
7955 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7963 static void clear_lro_session(struct lro *lro)
7965 static u16 lro_struct_size = sizeof(struct lro);
7967 memset(lro, 0, lro_struct_size);
7970 static void queue_rx_frame(struct sk_buff *skb)
7972 struct net_device *dev = skb->dev;
7974 skb->protocol = eth_type_trans(skb, dev);
7976 netif_receive_skb(skb);
7981 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7982 struct sk_buff *skb,
7985 struct sk_buff *first = lro->parent;
7987 first->len += tcp_len;
7988 first->data_len = lro->frags_len;
7989 skb_pull(skb, (skb->len - tcp_len));
7990 if (skb_shinfo(first)->frag_list)
7991 lro->last_frag->next = skb;
7993 skb_shinfo(first)->frag_list = skb;
7994 first->truesize += skb->truesize;
7995 lro->last_frag = skb;
7996 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8001 * s2io_io_error_detected - called when PCI error is detected
8002 * @pdev: Pointer to PCI device
8003 * @state: The current pci connection state
8005 * This function is called after a PCI bus error affecting
8006 * this device has been detected.
8008 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8009 pci_channel_state_t state)
8011 struct net_device *netdev = pci_get_drvdata(pdev);
8012 struct s2io_nic *sp = netdev->priv;
8014 netif_device_detach(netdev);
8016 if (netif_running(netdev)) {
8017 /* Bring down the card, while avoiding PCI I/O */
8018 do_s2io_card_down(sp, 0);
8020 pci_disable_device(pdev);
8022 return PCI_ERS_RESULT_NEED_RESET;
8026 * s2io_io_slot_reset - called after the pci bus has been reset.
8027 * @pdev: Pointer to PCI device
8029 * Restart the card from scratch, as if from a cold-boot.
8030 * At this point, the card has exprienced a hard reset,
8031 * followed by fixups by BIOS, and has its config space
8032 * set up identically to what it was at cold boot.
8034 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8036 struct net_device *netdev = pci_get_drvdata(pdev);
8037 struct s2io_nic *sp = netdev->priv;
8039 if (pci_enable_device(pdev)) {
8040 printk(KERN_ERR "s2io: "
8041 "Cannot re-enable PCI device after reset.\n");
8042 return PCI_ERS_RESULT_DISCONNECT;
8045 pci_set_master(pdev);
8048 return PCI_ERS_RESULT_RECOVERED;
8052 * s2io_io_resume - called when traffic can start flowing again.
8053 * @pdev: Pointer to PCI device
8055 * This callback is called when the error recovery driver tells
8056 * us that its OK to resume normal operation.
8058 static void s2io_io_resume(struct pci_dev *pdev)
8060 struct net_device *netdev = pci_get_drvdata(pdev);
8061 struct s2io_nic *sp = netdev->priv;
8063 if (netif_running(netdev)) {
8064 if (s2io_card_up(sp)) {
8065 printk(KERN_ERR "s2io: "
8066 "Can't bring device back up after reset.\n");
8070 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8072 printk(KERN_ERR "s2io: "
8073 "Can't resetore mac addr after reset.\n");
8078 netif_device_attach(netdev);
8079 netif_wake_queue(netdev);