1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
55 ************************************************************************/
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
76 #include <linux/tcp.h>
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
82 #include <asm/div64.h>
87 #include "s2io-regs.h"
89 #define DRV_VERSION "2.0.26.24"
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
319 {"prc_pcix_err_cnt"},
326 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
336 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
360 struct s2io_nic *nic = dev->priv;
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
381 struct s2io_nic *nic = dev->priv;
382 unsigned long flags[MAX_TX_FIFOS];
383 struct mac_info *mac_control = &nic->mac_control;
384 struct config_param *config = &nic->config;
386 for (i = 0; i < config->tx_fifo_num; i++)
387 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
390 vlan_group_set_device(nic->vlgrp, vid, NULL);
392 for (i = config->tx_fifo_num - 1; i >= 0; i--)
393 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
398 * Constants to be programmed into the Xena's registers, to configure
403 static const u64 herc_act_dtx_cfg[] = {
405 0x8000051536750000ULL, 0x80000515367500E0ULL,
407 0x8000051536750004ULL, 0x80000515367500E4ULL,
409 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
411 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
413 0x801205150D440000ULL, 0x801205150D4400E0ULL,
415 0x801205150D440004ULL, 0x801205150D4400E4ULL,
417 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
419 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
424 static const u64 xena_dtx_cfg[] = {
426 0x8000051500000000ULL, 0x80000515000000E0ULL,
428 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
430 0x8001051500000000ULL, 0x80010515000000E0ULL,
432 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
434 0x8002051500000000ULL, 0x80020515000000E0ULL,
436 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
441 * Constants for Fixing the MacAddress problem seen mostly on
444 static const u64 fix_mac[] = {
445 0x0060000000000000ULL, 0x0060600000000000ULL,
446 0x0040600000000000ULL, 0x0000600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0060600000000000ULL,
456 0x0020600000000000ULL, 0x0060600000000000ULL,
457 0x0020600000000000ULL, 0x0000600000000000ULL,
458 0x0040600000000000ULL, 0x0060600000000000ULL,
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490 * aggregation happens until we hit max IP pkt size(64K)
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
512 * This table lists all the devices that this driver supports.
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520 PCI_ANY_ID, PCI_ANY_ID},
521 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522 PCI_ANY_ID, PCI_ANY_ID},
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
528 static struct pci_error_handlers s2io_err_handler = {
529 .error_detected = s2io_io_error_detected,
530 .slot_reset = s2io_io_slot_reset,
531 .resume = s2io_io_resume,
534 static struct pci_driver s2io_driver = {
536 .id_table = s2io_tbl,
537 .probe = s2io_init_nic,
538 .remove = __devexit_p(s2io_rem_nic),
539 .err_handler = &s2io_err_handler,
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
549 if (sp->config.multiq) {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 netif_stop_subqueue(sp->dev, i);
553 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
559 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
561 if (sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
564 sp->mac_control.fifos[fifo_no].queue_state =
566 netif_stop_queue(sp->dev);
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
573 if (sp->config.multiq) {
574 for (i = 0; i < sp->config.tx_fifo_num; i++)
575 netif_start_subqueue(sp->dev, i);
577 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
583 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
585 if (sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
588 sp->mac_control.fifos[fifo_no].queue_state =
590 netif_start_queue(sp->dev);
594 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 if (sp->config.multiq) {
598 for (i = 0; i < sp->config.tx_fifo_num; i++)
599 netif_wake_subqueue(sp->dev, i);
601 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
607 static inline void s2io_wake_tx_queue(
608 struct fifo_info *fifo, int cnt, u8 multiq)
612 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615 if (netif_queue_stopped(fifo->dev)) {
616 fifo->queue_state = FIFO_QUEUE_START;
617 netif_wake_queue(fifo->dev);
623 * init_shared_mem - Allocation and Initialization of Memory
624 * @nic: Device private variable.
625 * Description: The function allocates all the memory areas shared
626 * between the NIC and the driver. This includes Tx descriptors,
627 * Rx descriptors and the statistics block.
630 static int init_shared_mem(struct s2io_nic *nic)
633 void *tmp_v_addr, *tmp_v_addr_next;
634 dma_addr_t tmp_p_addr, tmp_p_addr_next;
635 struct RxD_block *pre_rxd_blk = NULL;
637 int lst_size, lst_per_page;
638 struct net_device *dev = nic->dev;
642 struct mac_info *mac_control;
643 struct config_param *config;
644 unsigned long long mem_allocated = 0;
646 mac_control = &nic->mac_control;
647 config = &nic->config;
650 /* Allocation and initialization of TXDLs in FIOFs */
652 for (i = 0; i < config->tx_fifo_num; i++) {
653 size += config->tx_cfg[i].fifo_len;
655 if (size > MAX_AVAILABLE_TXDS) {
656 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
657 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
662 for (i = 0; i < config->tx_fifo_num; i++) {
663 size = config->tx_cfg[i].fifo_len;
665 * Legal values are from 2 to 8192
668 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
676 lst_size = (sizeof(struct TxD) * config->max_txds);
677 lst_per_page = PAGE_SIZE / lst_size;
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int fifo_len = config->tx_cfg[i].fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
684 if (!mac_control->fifos[i].list_info) {
686 "Malloc failed for list_info\n");
689 mem_allocated += list_holder_size;
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
694 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695 mac_control->fifos[i].tx_curr_put_info.fifo_len =
696 config->tx_cfg[i].fifo_len - 1;
697 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698 mac_control->fifos[i].tx_curr_get_info.fifo_len =
699 config->tx_cfg[i].fifo_len - 1;
700 mac_control->fifos[i].fifo_no = i;
701 mac_control->fifos[i].nic = nic;
702 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
703 mac_control->fifos[i].dev = dev;
705 for (j = 0; j < page_num; j++) {
709 tmp_v = pci_alloc_consistent(nic->pdev,
713 "pci_alloc_consistent ");
714 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
717 /* If we got a zero DMA address(can happen on
718 * certain platforms like PPC), reallocate.
719 * Store virtual address of page we don't want,
723 mac_control->zerodma_virt_addr = tmp_v;
725 "%s: Zero DMA address for TxDL. ", dev->name);
727 "Virtual address %p\n", tmp_v);
728 tmp_v = pci_alloc_consistent(nic->pdev,
732 "pci_alloc_consistent ");
733 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
736 mem_allocated += PAGE_SIZE;
738 while (k < lst_per_page) {
739 int l = (j * lst_per_page) + k;
740 if (l == config->tx_cfg[i].fifo_len)
742 mac_control->fifos[i].list_info[l].list_virt_addr =
743 tmp_v + (k * lst_size);
744 mac_control->fifos[i].list_info[l].list_phy_addr =
745 tmp_p + (k * lst_size);
751 for (i = 0; i < config->tx_fifo_num; i++) {
752 size = config->tx_cfg[i].fifo_len;
753 mac_control->fifos[i].ufo_in_band_v
754 = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!mac_control->fifos[i].ufo_in_band_v)
757 mem_allocated += (size * sizeof(u64));
760 /* Allocation and initialization of RXDs in Rings */
762 for (i = 0; i < config->rx_ring_num; i++) {
763 if (config->rx_cfg[i].num_rxd %
764 (rxd_count[nic->rxd_mode] + 1)) {
765 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
768 DBG_PRINT(ERR_DBG, "RxDs per Block");
771 size += config->rx_cfg[i].num_rxd;
772 mac_control->rings[i].block_count =
773 config->rx_cfg[i].num_rxd /
774 (rxd_count[nic->rxd_mode] + 1 );
775 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776 mac_control->rings[i].block_count;
778 if (nic->rxd_mode == RXD_MODE_1)
779 size = (size * (sizeof(struct RxD1)));
781 size = (size * (sizeof(struct RxD3)));
783 for (i = 0; i < config->rx_ring_num; i++) {
784 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785 mac_control->rings[i].rx_curr_get_info.offset = 0;
786 mac_control->rings[i].rx_curr_get_info.ring_len =
787 config->rx_cfg[i].num_rxd - 1;
788 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789 mac_control->rings[i].rx_curr_put_info.offset = 0;
790 mac_control->rings[i].rx_curr_put_info.ring_len =
791 config->rx_cfg[i].num_rxd - 1;
792 mac_control->rings[i].nic = nic;
793 mac_control->rings[i].ring_no = i;
794 mac_control->rings[i].lro = lro_enable;
796 blk_cnt = config->rx_cfg[i].num_rxd /
797 (rxd_count[nic->rxd_mode] + 1);
798 /* Allocating all the Rx blocks */
799 for (j = 0; j < blk_cnt; j++) {
800 struct rx_block_info *rx_blocks;
803 rx_blocks = &mac_control->rings[i].rx_blocks[j];
804 size = SIZE_OF_BLOCK; //size is always page size
805 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
807 if (tmp_v_addr == NULL) {
809 * In case of failure, free_shared_mem()
810 * is called, which should free any
811 * memory that was alloced till the
814 rx_blocks->block_virt_addr = tmp_v_addr;
817 mem_allocated += size;
818 memset(tmp_v_addr, 0, size);
819 rx_blocks->block_virt_addr = tmp_v_addr;
820 rx_blocks->block_dma_addr = tmp_p_addr;
821 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
822 rxd_count[nic->rxd_mode],
824 if (!rx_blocks->rxds)
827 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
828 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829 rx_blocks->rxds[l].virt_addr =
830 rx_blocks->block_virt_addr +
831 (rxd_size[nic->rxd_mode] * l);
832 rx_blocks->rxds[l].dma_addr =
833 rx_blocks->block_dma_addr +
834 (rxd_size[nic->rxd_mode] * l);
837 /* Interlinking all Rx Blocks */
838 for (j = 0; j < blk_cnt; j++) {
840 mac_control->rings[i].rx_blocks[j].block_virt_addr;
842 mac_control->rings[i].rx_blocks[(j + 1) %
843 blk_cnt].block_virt_addr;
845 mac_control->rings[i].rx_blocks[j].block_dma_addr;
847 mac_control->rings[i].rx_blocks[(j + 1) %
848 blk_cnt].block_dma_addr;
850 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
851 pre_rxd_blk->reserved_2_pNext_RxD_block =
852 (unsigned long) tmp_v_addr_next;
853 pre_rxd_blk->pNext_RxD_Blk_physical =
854 (u64) tmp_p_addr_next;
857 if (nic->rxd_mode == RXD_MODE_3B) {
859 * Allocation of Storages for buffer addresses in 2BUFF mode
860 * and the buffers as well.
862 for (i = 0; i < config->rx_ring_num; i++) {
863 blk_cnt = config->rx_cfg[i].num_rxd /
864 (rxd_count[nic->rxd_mode]+ 1);
865 mac_control->rings[i].ba =
866 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
868 if (!mac_control->rings[i].ba)
870 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
871 for (j = 0; j < blk_cnt; j++) {
873 mac_control->rings[i].ba[j] =
874 kmalloc((sizeof(struct buffAdd) *
875 (rxd_count[nic->rxd_mode] + 1)),
877 if (!mac_control->rings[i].ba[j])
879 mem_allocated += (sizeof(struct buffAdd) * \
880 (rxd_count[nic->rxd_mode] + 1));
881 while (k != rxd_count[nic->rxd_mode]) {
882 ba = &mac_control->rings[i].ba[j][k];
884 ba->ba_0_org = (void *) kmalloc
885 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
889 (BUF0_LEN + ALIGN_SIZE);
890 tmp = (unsigned long)ba->ba_0_org;
892 tmp &= ~((unsigned long) ALIGN_SIZE);
893 ba->ba_0 = (void *) tmp;
895 ba->ba_1_org = (void *) kmalloc
896 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
900 += (BUF1_LEN + ALIGN_SIZE);
901 tmp = (unsigned long) ba->ba_1_org;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_1 = (void *) tmp;
911 /* Allocation and initialization of Statistics block */
912 size = sizeof(struct stat_block);
913 mac_control->stats_mem = pci_alloc_consistent
914 (nic->pdev, size, &mac_control->stats_mem_phy);
916 if (!mac_control->stats_mem) {
918 * In case of failure, free_shared_mem() is called, which
919 * should free any memory that was alloced till the
924 mem_allocated += size;
925 mac_control->stats_mem_sz = size;
927 tmp_v_addr = mac_control->stats_mem;
928 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
929 memset(tmp_v_addr, 0, size);
930 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931 (unsigned long long) tmp_p_addr);
932 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937 * free_shared_mem - Free the allocated Memory
938 * @nic: Device private variable.
939 * Description: This function is to free all memory locations allocated by
940 * the init_shared_mem() function and return it to the kernel.
943 static void free_shared_mem(struct s2io_nic *nic)
945 int i, j, blk_cnt, size;
947 dma_addr_t tmp_p_addr;
948 struct mac_info *mac_control;
949 struct config_param *config;
950 int lst_size, lst_per_page;
951 struct net_device *dev;
959 mac_control = &nic->mac_control;
960 config = &nic->config;
962 lst_size = (sizeof(struct TxD) * config->max_txds);
963 lst_per_page = PAGE_SIZE / lst_size;
965 for (i = 0; i < config->tx_fifo_num; i++) {
966 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
968 for (j = 0; j < page_num; j++) {
969 int mem_blks = (j * lst_per_page);
970 if (!mac_control->fifos[i].list_info)
972 if (!mac_control->fifos[i].list_info[mem_blks].
975 pci_free_consistent(nic->pdev, PAGE_SIZE,
976 mac_control->fifos[i].
979 mac_control->fifos[i].
982 nic->mac_control.stats_info->sw_stat.mem_freed
985 /* If we got a zero DMA address during allocation,
988 if (mac_control->zerodma_virt_addr) {
989 pci_free_consistent(nic->pdev, PAGE_SIZE,
990 mac_control->zerodma_virt_addr,
993 "%s: Freeing TxDL with zero DMA addr. ",
995 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996 mac_control->zerodma_virt_addr);
997 nic->mac_control.stats_info->sw_stat.mem_freed
1000 kfree(mac_control->fifos[i].list_info);
1001 nic->mac_control.stats_info->sw_stat.mem_freed +=
1002 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1005 size = SIZE_OF_BLOCK;
1006 for (i = 0; i < config->rx_ring_num; i++) {
1007 blk_cnt = mac_control->rings[i].block_count;
1008 for (j = 0; j < blk_cnt; j++) {
1009 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1011 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1013 if (tmp_v_addr == NULL)
1015 pci_free_consistent(nic->pdev, size,
1016 tmp_v_addr, tmp_p_addr);
1017 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1018 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1019 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1024 if (nic->rxd_mode == RXD_MODE_3B) {
1025 /* Freeing buffer storage addresses in 2BUFF mode. */
1026 for (i = 0; i < config->rx_ring_num; i++) {
1027 blk_cnt = config->rx_cfg[i].num_rxd /
1028 (rxd_count[nic->rxd_mode] + 1);
1029 for (j = 0; j < blk_cnt; j++) {
1031 if (!mac_control->rings[i].ba[j])
1033 while (k != rxd_count[nic->rxd_mode]) {
1034 struct buffAdd *ba =
1035 &mac_control->rings[i].ba[j][k];
1036 kfree(ba->ba_0_org);
1037 nic->mac_control.stats_info->sw_stat.\
1038 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1039 kfree(ba->ba_1_org);
1040 nic->mac_control.stats_info->sw_stat.\
1041 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1044 kfree(mac_control->rings[i].ba[j]);
1045 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046 (sizeof(struct buffAdd) *
1047 (rxd_count[nic->rxd_mode] + 1));
1049 kfree(mac_control->rings[i].ba);
1050 nic->mac_control.stats_info->sw_stat.mem_freed +=
1051 (sizeof(struct buffAdd *) * blk_cnt);
1055 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056 if (mac_control->fifos[i].ufo_in_band_v) {
1057 nic->mac_control.stats_info->sw_stat.mem_freed
1058 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059 kfree(mac_control->fifos[i].ufo_in_band_v);
1063 if (mac_control->stats_mem) {
1064 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065 mac_control->stats_mem_sz;
1066 pci_free_consistent(nic->pdev,
1067 mac_control->stats_mem_sz,
1068 mac_control->stats_mem,
1069 mac_control->stats_mem_phy);
1074 * s2io_verify_pci_mode -
1077 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1079 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1080 register u64 val64 = 0;
1083 val64 = readq(&bar0->pci_mode);
1084 mode = (u8)GET_PCI_MODE(val64);
1086 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087 return -1; /* Unknown PCI mode */
1091 #define NEC_VENID 0x1033
1092 #define NEC_DEVID 0x0125
1093 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1095 struct pci_dev *tdev = NULL;
1096 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1098 if (tdev->bus == s2io_pdev->bus->parent) {
1107 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1109 * s2io_print_pci_mode -
1111 static int s2io_print_pci_mode(struct s2io_nic *nic)
1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114 register u64 val64 = 0;
1116 struct config_param *config = &nic->config;
1118 val64 = readq(&bar0->pci_mode);
1119 mode = (u8)GET_PCI_MODE(val64);
1121 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122 return -1; /* Unknown PCI mode */
1124 config->bus_speed = bus_speed[mode];
1126 if (s2io_on_nec_bridge(nic->pdev)) {
1127 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1132 if (val64 & PCI_MODE_32_BITS) {
1133 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1135 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1139 case PCI_MODE_PCI_33:
1140 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1142 case PCI_MODE_PCI_66:
1143 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1145 case PCI_MODE_PCIX_M1_66:
1146 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1148 case PCI_MODE_PCIX_M1_100:
1149 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1151 case PCI_MODE_PCIX_M1_133:
1152 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1154 case PCI_MODE_PCIX_M2_66:
1155 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1157 case PCI_MODE_PCIX_M2_100:
1158 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1160 case PCI_MODE_PCIX_M2_133:
1161 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1164 return -1; /* Unsupported bus speed */
1171 * init_tti - Initialization transmit traffic interrupt scheme
1172 * @nic: device private variable
1173 * @link: link status (UP/DOWN) used to enable/disable continuous
1174 * transmit interrupts
1175 * Description: The function configures transmit traffic interrupts
1176 * Return Value: SUCCESS on success and
1180 static int init_tti(struct s2io_nic *nic, int link)
1182 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183 register u64 val64 = 0;
1185 struct config_param *config;
1187 config = &nic->config;
1189 for (i = 0; i < config->tx_fifo_num; i++) {
1191 * TTI Initialization. Default Tx timer gets us about
1192 * 250 interrupts per sec. Continuous interrupts are enabled
1195 if (nic->device_type == XFRAME_II_DEVICE) {
1196 int count = (nic->config.bus_speed * 125)/2;
1197 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1199 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1201 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1206 if (use_continuous_tx_intrs && (link == LINK_UP))
1207 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1208 writeq(val64, &bar0->tti_data1_mem);
1210 if (nic->config.intr_type == MSI_X) {
1211 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214 TTI_DATA2_MEM_TX_UFC_D(0x300);
1216 if ((nic->config.tx_steering_type ==
1217 TX_DEFAULT_STEERING) &&
1218 (config->tx_fifo_num > 1) &&
1219 (i >= nic->udp_fifo_idx) &&
1220 (i < (nic->udp_fifo_idx +
1221 nic->total_udp_fifos)))
1222 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225 TTI_DATA2_MEM_TX_UFC_D(0x120);
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80);
1233 writeq(val64, &bar0->tti_data2_mem);
1235 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236 TTI_CMD_MEM_OFFSET(i);
1237 writeq(val64, &bar0->tti_command_mem);
1239 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1248 * init_nic - Initialization of hardware
1249 * @nic: device private variable
1250 * Description: The function sequentially configures every block
1251 * of the H/W from their reset values.
1252 * Return Value: SUCCESS on success and
1253 * '-1' on failure (endian settings incorrect).
1256 static int init_nic(struct s2io_nic *nic)
1258 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259 struct net_device *dev = nic->dev;
1260 register u64 val64 = 0;
1264 struct mac_info *mac_control;
1265 struct config_param *config;
1267 unsigned long long mem_share;
1270 mac_control = &nic->mac_control;
1271 config = &nic->config;
1273 /* to set the swapper controle on the card */
1274 if(s2io_set_swapper(nic)) {
1275 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1280 * Herc requires EOI to be removed from reset before XGXS, so..
1282 if (nic->device_type & XFRAME_II_DEVICE) {
1283 val64 = 0xA500000000ULL;
1284 writeq(val64, &bar0->sw_reset);
1286 val64 = readq(&bar0->sw_reset);
1289 /* Remove XGXS from reset state */
1291 writeq(val64, &bar0->sw_reset);
1293 val64 = readq(&bar0->sw_reset);
1295 /* Ensure that it's safe to access registers by checking
1296 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1298 if (nic->device_type == XFRAME_II_DEVICE) {
1299 for (i = 0; i < 50; i++) {
1300 val64 = readq(&bar0->adapter_status);
1301 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1309 /* Enable Receiving broadcasts */
1310 add = &bar0->mac_cfg;
1311 val64 = readq(&bar0->mac_cfg);
1312 val64 |= MAC_RMAC_BCAST_ENABLE;
1313 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314 writel((u32) val64, add);
1315 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316 writel((u32) (val64 >> 32), (add + 4));
1318 /* Read registers in all blocks */
1319 val64 = readq(&bar0->mac_int_mask);
1320 val64 = readq(&bar0->mc_int_mask);
1321 val64 = readq(&bar0->xgxs_int_mask);
1325 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1327 if (nic->device_type & XFRAME_II_DEVICE) {
1328 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control, UF);
1332 msleep(1); /* Necessary!! */
1336 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338 &bar0->dtx_control, UF);
1339 val64 = readq(&bar0->dtx_control);
1344 /* Tx DMA Initialization */
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1347 writeq(val64, &bar0->tx_fifo_partition_1);
1348 writeq(val64, &bar0->tx_fifo_partition_2);
1349 writeq(val64, &bar0->tx_fifo_partition_3);
1352 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1354 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355 13) | vBIT(config->tx_cfg[i].fifo_priority,
1358 if (i == (config->tx_fifo_num - 1)) {
1365 writeq(val64, &bar0->tx_fifo_partition_0);
1370 writeq(val64, &bar0->tx_fifo_partition_1);
1375 writeq(val64, &bar0->tx_fifo_partition_2);
1380 writeq(val64, &bar0->tx_fifo_partition_3);
1391 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1394 if ((nic->device_type == XFRAME_I_DEVICE) &&
1395 (nic->pdev->revision < 4))
1396 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1398 val64 = readq(&bar0->tx_fifo_partition_0);
1399 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1403 * Initialization of Tx_PA_CONFIG register to ignore packet
1404 * integrity checking.
1406 val64 = readq(&bar0->tx_pa_cfg);
1407 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409 writeq(val64, &bar0->tx_pa_cfg);
1411 /* Rx DMA intialization. */
1413 for (i = 0; i < config->rx_ring_num; i++) {
1415 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1418 writeq(val64, &bar0->rx_queue_priority);
1421 * Allocating equal share of memory to all the
1425 if (nic->device_type & XFRAME_II_DEVICE)
1430 for (i = 0; i < config->rx_ring_num; i++) {
1433 mem_share = (mem_size / config->rx_ring_num +
1434 mem_size % config->rx_ring_num);
1435 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1438 mem_share = (mem_size / config->rx_ring_num);
1439 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1442 mem_share = (mem_size / config->rx_ring_num);
1443 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1446 mem_share = (mem_size / config->rx_ring_num);
1447 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1450 mem_share = (mem_size / config->rx_ring_num);
1451 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1454 mem_share = (mem_size / config->rx_ring_num);
1455 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1458 mem_share = (mem_size / config->rx_ring_num);
1459 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1462 mem_share = (mem_size / config->rx_ring_num);
1463 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1467 writeq(val64, &bar0->rx_queue_cfg);
1470 * Filling Tx round robin registers
1471 * as per the number of FIFOs for equal scheduling priority
1473 switch (config->tx_fifo_num) {
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 writeq(val64, &bar0->tx_w_round_robin_4);
1483 val64 = 0x0001000100010001ULL;
1484 writeq(val64, &bar0->tx_w_round_robin_0);
1485 writeq(val64, &bar0->tx_w_round_robin_1);
1486 writeq(val64, &bar0->tx_w_round_robin_2);
1487 writeq(val64, &bar0->tx_w_round_robin_3);
1488 val64 = 0x0001000100000000ULL;
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1492 val64 = 0x0001020001020001ULL;
1493 writeq(val64, &bar0->tx_w_round_robin_0);
1494 val64 = 0x0200010200010200ULL;
1495 writeq(val64, &bar0->tx_w_round_robin_1);
1496 val64 = 0x0102000102000102ULL;
1497 writeq(val64, &bar0->tx_w_round_robin_2);
1498 val64 = 0x0001020001020001ULL;
1499 writeq(val64, &bar0->tx_w_round_robin_3);
1500 val64 = 0x0200010200000000ULL;
1501 writeq(val64, &bar0->tx_w_round_robin_4);
1504 val64 = 0x0001020300010203ULL;
1505 writeq(val64, &bar0->tx_w_round_robin_0);
1506 writeq(val64, &bar0->tx_w_round_robin_1);
1507 writeq(val64, &bar0->tx_w_round_robin_2);
1508 writeq(val64, &bar0->tx_w_round_robin_3);
1509 val64 = 0x0001020300000000ULL;
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1513 val64 = 0x0001020304000102ULL;
1514 writeq(val64, &bar0->tx_w_round_robin_0);
1515 val64 = 0x0304000102030400ULL;
1516 writeq(val64, &bar0->tx_w_round_robin_1);
1517 val64 = 0x0102030400010203ULL;
1518 writeq(val64, &bar0->tx_w_round_robin_2);
1519 val64 = 0x0400010203040001ULL;
1520 writeq(val64, &bar0->tx_w_round_robin_3);
1521 val64 = 0x0203040000000000ULL;
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1525 val64 = 0x0001020304050001ULL;
1526 writeq(val64, &bar0->tx_w_round_robin_0);
1527 val64 = 0x0203040500010203ULL;
1528 writeq(val64, &bar0->tx_w_round_robin_1);
1529 val64 = 0x0405000102030405ULL;
1530 writeq(val64, &bar0->tx_w_round_robin_2);
1531 val64 = 0x0001020304050001ULL;
1532 writeq(val64, &bar0->tx_w_round_robin_3);
1533 val64 = 0x0203040500000000ULL;
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1537 val64 = 0x0001020304050600ULL;
1538 writeq(val64, &bar0->tx_w_round_robin_0);
1539 val64 = 0x0102030405060001ULL;
1540 writeq(val64, &bar0->tx_w_round_robin_1);
1541 val64 = 0x0203040506000102ULL;
1542 writeq(val64, &bar0->tx_w_round_robin_2);
1543 val64 = 0x0304050600010203ULL;
1544 writeq(val64, &bar0->tx_w_round_robin_3);
1545 val64 = 0x0405060000000000ULL;
1546 writeq(val64, &bar0->tx_w_round_robin_4);
1549 val64 = 0x0001020304050607ULL;
1550 writeq(val64, &bar0->tx_w_round_robin_0);
1551 writeq(val64, &bar0->tx_w_round_robin_1);
1552 writeq(val64, &bar0->tx_w_round_robin_2);
1553 writeq(val64, &bar0->tx_w_round_robin_3);
1554 val64 = 0x0001020300000000ULL;
1555 writeq(val64, &bar0->tx_w_round_robin_4);
1559 /* Enable all configured Tx FIFO partitions */
1560 val64 = readq(&bar0->tx_fifo_partition_0);
1561 val64 |= (TX_FIFO_PARTITION_EN);
1562 writeq(val64, &bar0->tx_fifo_partition_0);
1564 /* Filling the Rx round robin registers as per the
1565 * number of Rings and steering based on QoS with
1568 switch (config->rx_ring_num) {
1571 writeq(val64, &bar0->rx_w_round_robin_0);
1572 writeq(val64, &bar0->rx_w_round_robin_1);
1573 writeq(val64, &bar0->rx_w_round_robin_2);
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1577 val64 = 0x8080808080808080ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1581 val64 = 0x0001000100010001ULL;
1582 writeq(val64, &bar0->rx_w_round_robin_0);
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 writeq(val64, &bar0->rx_w_round_robin_2);
1585 writeq(val64, &bar0->rx_w_round_robin_3);
1586 val64 = 0x0001000100000000ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1589 val64 = 0x8080808040404040ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1593 val64 = 0x0001020001020001ULL;
1594 writeq(val64, &bar0->rx_w_round_robin_0);
1595 val64 = 0x0200010200010200ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_1);
1597 val64 = 0x0102000102000102ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_2);
1599 val64 = 0x0001020001020001ULL;
1600 writeq(val64, &bar0->rx_w_round_robin_3);
1601 val64 = 0x0200010200000000ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1604 val64 = 0x8080804040402020ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1608 val64 = 0x0001020300010203ULL;
1609 writeq(val64, &bar0->rx_w_round_robin_0);
1610 writeq(val64, &bar0->rx_w_round_robin_1);
1611 writeq(val64, &bar0->rx_w_round_robin_2);
1612 writeq(val64, &bar0->rx_w_round_robin_3);
1613 val64 = 0x0001020300000000ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_4);
1616 val64 = 0x8080404020201010ULL;
1617 writeq(val64, &bar0->rts_qos_steering);
1620 val64 = 0x0001020304000102ULL;
1621 writeq(val64, &bar0->rx_w_round_robin_0);
1622 val64 = 0x0304000102030400ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_1);
1624 val64 = 0x0102030400010203ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_2);
1626 val64 = 0x0400010203040001ULL;
1627 writeq(val64, &bar0->rx_w_round_robin_3);
1628 val64 = 0x0203040000000000ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_4);
1631 val64 = 0x8080404020201008ULL;
1632 writeq(val64, &bar0->rts_qos_steering);
1635 val64 = 0x0001020304050001ULL;
1636 writeq(val64, &bar0->rx_w_round_robin_0);
1637 val64 = 0x0203040500010203ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_1);
1639 val64 = 0x0405000102030405ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_2);
1641 val64 = 0x0001020304050001ULL;
1642 writeq(val64, &bar0->rx_w_round_robin_3);
1643 val64 = 0x0203040500000000ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_4);
1646 val64 = 0x8080404020100804ULL;
1647 writeq(val64, &bar0->rts_qos_steering);
1650 val64 = 0x0001020304050600ULL;
1651 writeq(val64, &bar0->rx_w_round_robin_0);
1652 val64 = 0x0102030405060001ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_1);
1654 val64 = 0x0203040506000102ULL;
1655 writeq(val64, &bar0->rx_w_round_robin_2);
1656 val64 = 0x0304050600010203ULL;
1657 writeq(val64, &bar0->rx_w_round_robin_3);
1658 val64 = 0x0405060000000000ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1661 val64 = 0x8080402010080402ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1665 val64 = 0x0001020304050607ULL;
1666 writeq(val64, &bar0->rx_w_round_robin_0);
1667 writeq(val64, &bar0->rx_w_round_robin_1);
1668 writeq(val64, &bar0->rx_w_round_robin_2);
1669 writeq(val64, &bar0->rx_w_round_robin_3);
1670 val64 = 0x0001020300000000ULL;
1671 writeq(val64, &bar0->rx_w_round_robin_4);
1673 val64 = 0x8040201008040201ULL;
1674 writeq(val64, &bar0->rts_qos_steering);
1680 for (i = 0; i < 8; i++)
1681 writeq(val64, &bar0->rts_frm_len_n[i]);
1683 /* Set the default rts frame length for the rings configured */
1684 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685 for (i = 0 ; i < config->rx_ring_num ; i++)
1686 writeq(val64, &bar0->rts_frm_len_n[i]);
1688 /* Set the frame length for the configured rings
1689 * desired by the user
1691 for (i = 0; i < config->rx_ring_num; i++) {
1692 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693 * specified frame length steering.
1694 * If the user provides the frame length then program
1695 * the rts_frm_len register for those values or else
1696 * leave it as it is.
1698 if (rts_frm_len[i] != 0) {
1699 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700 &bar0->rts_frm_len_n[i]);
1704 /* Disable differentiated services steering logic */
1705 for (i = 0; i < 64; i++) {
1706 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1709 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1714 /* Program statistics memory */
1715 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1717 if (nic->device_type == XFRAME_II_DEVICE) {
1718 val64 = STAT_BC(0x320);
1719 writeq(val64, &bar0->stat_byte_cnt);
1723 * Initializing the sampling rate for the device to calculate the
1724 * bandwidth utilization.
1726 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728 writeq(val64, &bar0->mac_link_util);
1731 * Initializing the Transmit and Receive Traffic Interrupt
1735 /* Initialize TTI */
1736 if (SUCCESS != init_tti(nic, nic->last_link_state))
1739 /* RTI Initialization */
1740 if (nic->device_type == XFRAME_II_DEVICE) {
1742 * Programmed to generate Apprx 500 Intrs per
1745 int count = (nic->config.bus_speed * 125)/4;
1746 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1748 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1753 writeq(val64, &bar0->rti_data1_mem);
1755 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757 if (nic->config.intr_type == MSI_X)
1758 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759 RTI_DATA2_MEM_RX_UFC_D(0x40));
1761 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762 RTI_DATA2_MEM_RX_UFC_D(0x80));
1763 writeq(val64, &bar0->rti_data2_mem);
1765 for (i = 0; i < config->rx_ring_num; i++) {
1766 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767 | RTI_CMD_MEM_OFFSET(i);
1768 writeq(val64, &bar0->rti_command_mem);
1771 * Once the operation completes, the Strobe bit of the
1772 * command register will be reset. We poll for this
1773 * particular condition. We wait for a maximum of 500ms
1774 * for the operation to complete, if it's not complete
1775 * by then we return error.
1779 val64 = readq(&bar0->rti_command_mem);
1780 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1784 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1794 * Initializing proper values as Pause threshold into all
1795 * the 8 Queues on Rx side.
1797 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1800 /* Disable RMAC PAD STRIPPING */
1801 add = &bar0->mac_cfg;
1802 val64 = readq(&bar0->mac_cfg);
1803 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 val64 = readq(&bar0->mac_cfg);
1810 /* Enable FCS stripping by adapter */
1811 add = &bar0->mac_cfg;
1812 val64 = readq(&bar0->mac_cfg);
1813 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814 if (nic->device_type == XFRAME_II_DEVICE)
1815 writeq(val64, &bar0->mac_cfg);
1817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818 writel((u32) (val64), add);
1819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820 writel((u32) (val64 >> 32), (add + 4));
1824 * Set the time value to be inserted in the pause frame
1825 * generated by xena.
1827 val64 = readq(&bar0->rmac_pause_cfg);
1828 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830 writeq(val64, &bar0->rmac_pause_cfg);
1833 * Set the Threshold Limit for Generating the pause frame
1834 * If the amount of data in any Queue exceeds ratio of
1835 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836 * pause frame is generated
1839 for (i = 0; i < 4; i++) {
1841 (((u64) 0xFF00 | nic->mac_control.
1842 mc_pause_threshold_q0q3)
1845 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1848 for (i = 0; i < 4; i++) {
1850 (((u64) 0xFF00 | nic->mac_control.
1851 mc_pause_threshold_q4q7)
1854 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1857 * TxDMA will stop Read request if the number of read split has
1858 * exceeded the limit pointed by shared_splits
1860 val64 = readq(&bar0->pic_control);
1861 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862 writeq(val64, &bar0->pic_control);
1864 if (nic->config.bus_speed == 266) {
1865 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866 writeq(0x0, &bar0->read_retry_delay);
1867 writeq(0x0, &bar0->write_retry_delay);
1871 * Programming the Herc to split every write transaction
1872 * that does not start on an ADB to reduce disconnects.
1874 if (nic->device_type == XFRAME_II_DEVICE) {
1875 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876 MISC_LINK_STABILITY_PRD(3);
1877 writeq(val64, &bar0->misc_control);
1878 val64 = readq(&bar0->pic_control2);
1879 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880 writeq(val64, &bar0->pic_control2);
1882 if (strstr(nic->product_name, "CX4")) {
1883 val64 = TMAC_AVG_IPG(0x17);
1884 writeq(val64, &bar0->tmac_avg_ipg);
1889 #define LINK_UP_DOWN_INTERRUPT 1
1890 #define MAC_RMAC_ERR_TIMER 2
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1894 if (nic->config.intr_type != INTA)
1895 return MAC_RMAC_ERR_TIMER;
1896 if (nic->device_type == XFRAME_II_DEVICE)
1897 return LINK_UP_DOWN_INTERRUPT;
1899 return MAC_RMAC_ERR_TIMER;
1903 * do_s2io_write_bits - update alarm bits in alarm register
1904 * @value: alarm bits
1905 * @flag: interrupt status
1906 * @addr: address value
1907 * Description: update alarm bits in alarm register
1911 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1915 temp64 = readq(addr);
1917 if(flag == ENABLE_INTRS)
1918 temp64 &= ~((u64) value);
1920 temp64 |= ((u64) value);
1921 writeq(temp64, addr);
1924 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1926 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927 register u64 gen_int_mask = 0;
1929 if (mask & TX_DMA_INTR) {
1931 gen_int_mask |= TXDMA_INT_M;
1933 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1938 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941 &bar0->pfc_err_mask);
1943 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1947 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1954 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1957 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960 flag, &bar0->lso_err_mask);
1962 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963 flag, &bar0->tpa_err_mask);
1965 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1969 if (mask & TX_MAC_INTR) {
1970 gen_int_mask |= TXMAC_INT_M;
1971 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972 &bar0->mac_int_mask);
1973 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976 flag, &bar0->mac_tmac_err_mask);
1979 if (mask & TX_XGXS_INTR) {
1980 gen_int_mask |= TXXGXS_INT_M;
1981 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982 &bar0->xgxs_int_mask);
1983 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985 flag, &bar0->xgxs_txgxs_err_mask);
1988 if (mask & RX_DMA_INTR) {
1989 gen_int_mask |= RXDMA_INT_M;
1990 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992 flag, &bar0->rxdma_int_mask);
1993 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000 &bar0->prc_pcix_err_mask);
2001 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003 &bar0->rpa_err_mask);
2004 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008 flag, &bar0->rda_err_mask);
2009 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011 flag, &bar0->rti_err_mask);
2014 if (mask & RX_MAC_INTR) {
2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask);
2018 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR |
2021 RMAC_LINK_STATE_CHANGE_INT,
2022 flag, &bar0->mac_rmac_err_mask);
2025 if (mask & RX_XGXS_INTR)
2027 gen_int_mask |= RXXGXS_INT_M;
2028 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029 &bar0->xgxs_int_mask);
2030 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031 &bar0->xgxs_rxgxs_err_mask);
2034 if (mask & MC_INTR) {
2035 gen_int_mask |= MC_INT_M;
2036 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039 &bar0->mc_err_mask);
2041 nic->general_int_mask = gen_int_mask;
2043 /* Remove this line when alarm interrupts are enabled */
2044 nic->general_int_mask = 0;
2047 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2048 * @nic: device private variable,
2049 * @mask: A mask indicating which Intr block must be modified and,
2050 * @flag: A flag indicating whether to enable or disable the Intrs.
2051 * Description: This function will either disable or enable the interrupts
2052 * depending on the flag argument. The mask argument can be used to
2053 * enable/disable any Intr block.
2054 * Return Value: NONE.
2057 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2059 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2060 register u64 temp64 = 0, intr_mask = 0;
2062 intr_mask = nic->general_int_mask;
2064 /* Top level interrupt classification */
2065 /* PIC Interrupts */
2066 if (mask & TX_PIC_INTR) {
2067 /* Enable PIC Intrs in the general intr mask register */
2068 intr_mask |= TXPIC_INT_M;
2069 if (flag == ENABLE_INTRS) {
2071 * If Hercules adapter enable GPIO otherwise
2072 * disable all PCIX, Flash, MDIO, IIC and GPIO
2073 * interrupts for now.
2076 if (s2io_link_fault_indication(nic) ==
2077 LINK_UP_DOWN_INTERRUPT ) {
2078 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079 &bar0->pic_int_mask);
2080 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081 &bar0->gpio_int_mask);
2083 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2084 } else if (flag == DISABLE_INTRS) {
2086 * Disable PIC Intrs in the general
2087 * intr mask register
2089 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2093 /* Tx traffic interrupts */
2094 if (mask & TX_TRAFFIC_INTR) {
2095 intr_mask |= TXTRAFFIC_INT_M;
2096 if (flag == ENABLE_INTRS) {
2098 * Enable all the Tx side interrupts
2099 * writing 0 Enables all 64 TX interrupt levels
2101 writeq(0x0, &bar0->tx_traffic_mask);
2102 } else if (flag == DISABLE_INTRS) {
2104 * Disable Tx Traffic Intrs in the general intr mask
2107 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2111 /* Rx traffic interrupts */
2112 if (mask & RX_TRAFFIC_INTR) {
2113 intr_mask |= RXTRAFFIC_INT_M;
2114 if (flag == ENABLE_INTRS) {
2115 /* writing 0 Enables all 8 RX interrupt levels */
2116 writeq(0x0, &bar0->rx_traffic_mask);
2117 } else if (flag == DISABLE_INTRS) {
2119 * Disable Rx Traffic Intrs in the general intr mask
2122 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2126 temp64 = readq(&bar0->general_int_mask);
2127 if (flag == ENABLE_INTRS)
2128 temp64 &= ~((u64) intr_mask);
2130 temp64 = DISABLE_ALL_INTRS;
2131 writeq(temp64, &bar0->general_int_mask);
2133 nic->general_int_mask = readq(&bar0->general_int_mask);
2137 * verify_pcc_quiescent- Checks for PCC quiescent state
2138 * Return: 1 If PCC is quiescence
2139 * 0 If PCC is not quiescence
2141 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2145 u64 val64 = readq(&bar0->adapter_status);
2147 herc = (sp->device_type == XFRAME_II_DEVICE);
2149 if (flag == FALSE) {
2150 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2151 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2154 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2158 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2160 ADAPTER_STATUS_RMAC_PCC_IDLE))
2163 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2164 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2172 * verify_xena_quiescence - Checks whether the H/W is ready
2173 * Description: Returns whether the H/W is ready to go or not. Depending
2174 * on whether adapter enable bit was written or not the comparison
2175 * differs and the calling function passes the input argument flag to
2177 * Return: 1 If xena is quiescence
2178 * 0 If Xena is not quiescence
2181 static int verify_xena_quiescence(struct s2io_nic *sp)
2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185 u64 val64 = readq(&bar0->adapter_status);
2186 mode = s2io_verify_pci_mode(sp);
2188 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2192 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2196 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2200 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2204 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2208 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2212 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2216 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2222 * In PCI 33 mode, the P_PLL is not used, and therefore,
2223 * the the P_PLL_LOCK bit in the adapter_status register will
2226 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227 sp->device_type == XFRAME_II_DEVICE && mode !=
2229 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2232 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2241 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2242 * @sp: Pointer to device specifc structure
2244 * New procedure to clear mac address reading problems on Alpha platforms
2248 static void fix_mac_address(struct s2io_nic * sp)
2250 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2254 while (fix_mac[i] != END_SIGN) {
2255 writeq(fix_mac[i++], &bar0->gpio_control);
2257 val64 = readq(&bar0->gpio_control);
2262 * start_nic - Turns the device on
2263 * @nic : device private variable.
2265 * This function actually turns the device on. Before this function is
2266 * called,all Registers are configured from their reset states
2267 * and shared memory is allocated but the NIC is still quiescent. On
2268 * calling this function, the device interrupts are cleared and the NIC is
2269 * literally switched on by writing into the adapter control register.
2271 * SUCCESS on success and -1 on failure.
2274 static int start_nic(struct s2io_nic *nic)
2276 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2277 struct net_device *dev = nic->dev;
2278 register u64 val64 = 0;
2280 struct mac_info *mac_control;
2281 struct config_param *config;
2283 mac_control = &nic->mac_control;
2284 config = &nic->config;
2286 /* PRC Initialization and configuration */
2287 for (i = 0; i < config->rx_ring_num; i++) {
2288 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2289 &bar0->prc_rxd0_n[i]);
2291 val64 = readq(&bar0->prc_ctrl_n[i]);
2292 if (nic->rxd_mode == RXD_MODE_1)
2293 val64 |= PRC_CTRL_RC_ENABLED;
2295 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2296 if (nic->device_type == XFRAME_II_DEVICE)
2297 val64 |= PRC_CTRL_GROUP_READS;
2298 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2300 writeq(val64, &bar0->prc_ctrl_n[i]);
2303 if (nic->rxd_mode == RXD_MODE_3B) {
2304 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307 writeq(val64, &bar0->rx_pa_cfg);
2310 if (vlan_tag_strip == 0) {
2311 val64 = readq(&bar0->rx_pa_cfg);
2312 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313 writeq(val64, &bar0->rx_pa_cfg);
2314 vlan_strip_flag = 0;
2318 * Enabling MC-RLDRAM. After enabling the device, we timeout
2319 * for around 100ms, which is approximately the time required
2320 * for the device to be ready for operation.
2322 val64 = readq(&bar0->mc_rldram_mrs);
2323 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325 val64 = readq(&bar0->mc_rldram_mrs);
2327 msleep(100); /* Delay by around 100 ms. */
2329 /* Enabling ECC Protection. */
2330 val64 = readq(&bar0->adapter_control);
2331 val64 &= ~ADAPTER_ECC_EN;
2332 writeq(val64, &bar0->adapter_control);
2335 * Verify if the device is ready to be enabled, if so enable
2338 val64 = readq(&bar0->adapter_status);
2339 if (!verify_xena_quiescence(nic)) {
2340 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342 (unsigned long long) val64);
2347 * With some switches, link might be already up at this point.
2348 * Because of this weird behavior, when we enable laser,
2349 * we may not get link. We need to handle this. We cannot
2350 * figure out which switch is misbehaving. So we are forced to
2351 * make a global change.
2354 /* Enabling Laser. */
2355 val64 = readq(&bar0->adapter_control);
2356 val64 |= ADAPTER_EOI_TX_ON;
2357 writeq(val64, &bar0->adapter_control);
2359 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2361 * Dont see link state interrupts initally on some switches,
2362 * so directly scheduling the link state task here.
2364 schedule_work(&nic->set_link_task);
2366 /* SXE-002: Initialize link and activity LED */
2367 subid = nic->pdev->subsystem_device;
2368 if (((subid & 0xFF) >= 0x07) &&
2369 (nic->device_type == XFRAME_I_DEVICE)) {
2370 val64 = readq(&bar0->gpio_control);
2371 val64 |= 0x0000800000000000ULL;
2372 writeq(val64, &bar0->gpio_control);
2373 val64 = 0x0411040400000000ULL;
2374 writeq(val64, (void __iomem *)bar0 + 0x2700);
2380 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2382 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383 TxD *txdlp, int get_off)
2385 struct s2io_nic *nic = fifo_data->nic;
2386 struct sk_buff *skb;
2391 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2392 pci_unmap_single(nic->pdev, (dma_addr_t)
2393 txds->Buffer_Pointer, sizeof(u64),
2398 skb = (struct sk_buff *) ((unsigned long)
2399 txds->Host_Control);
2401 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2404 pci_unmap_single(nic->pdev, (dma_addr_t)
2405 txds->Buffer_Pointer,
2406 skb->len - skb->data_len,
2408 frg_cnt = skb_shinfo(skb)->nr_frags;
2411 for (j = 0; j < frg_cnt; j++, txds++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413 if (!txds->Buffer_Pointer)
2415 pci_unmap_page(nic->pdev, (dma_addr_t)
2416 txds->Buffer_Pointer,
2417 frag->size, PCI_DMA_TODEVICE);
2420 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2425 * free_tx_buffers - Free all queued Tx buffers
2426 * @nic : device private variable.
2428 * Free all queued Tx buffers.
2429 * Return Value: void
2432 static void free_tx_buffers(struct s2io_nic *nic)
2434 struct net_device *dev = nic->dev;
2435 struct sk_buff *skb;
2438 struct mac_info *mac_control;
2439 struct config_param *config;
2442 mac_control = &nic->mac_control;
2443 config = &nic->config;
2445 for (i = 0; i < config->tx_fifo_num; i++) {
2446 unsigned long flags;
2447 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2448 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2449 txdp = (struct TxD *) \
2450 mac_control->fifos[i].list_info[j].list_virt_addr;
2451 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2453 nic->mac_control.stats_info->sw_stat.mem_freed
2460 "%s:forcibly freeing %d skbs on FIFO%d\n",
2462 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2464 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2469 * stop_nic - To stop the nic
2470 * @nic ; device private variable.
2472 * This function does exactly the opposite of what the start_nic()
2473 * function does. This function is called to stop the device.
2478 static void stop_nic(struct s2io_nic *nic)
2480 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2481 register u64 val64 = 0;
2483 struct mac_info *mac_control;
2484 struct config_param *config;
2486 mac_control = &nic->mac_control;
2487 config = &nic->config;
2489 /* Disable all interrupts */
2490 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2491 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2492 interruptible |= TX_PIC_INTR;
2493 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2495 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496 val64 = readq(&bar0->adapter_control);
2497 val64 &= ~(ADAPTER_CNTL_EN);
2498 writeq(val64, &bar0->adapter_control);
2502 * fill_rx_buffers - Allocates the Rx side skbs
2503 * @ring_info: per ring structure
2505 * The function allocates Rx side skbs and puts the physical
2506 * address of these buffers into the RxD buffer pointers, so that the NIC
2507 * can DMA the received frame into these locations.
2508 * The NIC supports 3 receive modes, viz
2510 * 2. three buffer and
2511 * 3. Five buffer modes.
2512 * Each mode defines how many fragments the received frame will be split
2513 * up into by the NIC. The frame is split into L3 header, L4 Header,
2514 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2515 * is split into 3 fragments. As of now only single buffer mode is
2518 * SUCCESS on success or an appropriate -ve value on failure.
2521 static int fill_rx_buffers(struct ring_info *ring)
2523 struct sk_buff *skb;
2525 int off, size, block_no, block_no1;
2530 struct RxD_t *first_rxdp = NULL;
2531 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2535 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2537 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2539 block_no1 = ring->rx_curr_get_info.block_index;
2540 while (alloc_tab < alloc_cnt) {
2541 block_no = ring->rx_curr_put_info.block_index;
2543 off = ring->rx_curr_put_info.offset;
2545 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2547 rxd_index = off + 1;
2549 rxd_index += (block_no * ring->rxd_count);
2551 if ((block_no == block_no1) &&
2552 (off == ring->rx_curr_get_info.offset) &&
2553 (rxdp->Host_Control)) {
2554 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2556 DBG_PRINT(INTR_DBG, " info equated\n");
2559 if (off && (off == ring->rxd_count)) {
2560 ring->rx_curr_put_info.block_index++;
2561 if (ring->rx_curr_put_info.block_index ==
2563 ring->rx_curr_put_info.block_index = 0;
2564 block_no = ring->rx_curr_put_info.block_index;
2566 ring->rx_curr_put_info.offset = off;
2567 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2568 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2569 ring->dev->name, rxdp);
2573 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2574 ((ring->rxd_mode == RXD_MODE_3B) &&
2575 (rxdp->Control_2 & s2BIT(0)))) {
2576 ring->rx_curr_put_info.offset = off;
2579 /* calculate size of skb based on ring mode */
2580 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2581 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2582 if (ring->rxd_mode == RXD_MODE_1)
2583 size += NET_IP_ALIGN;
2585 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2588 skb = dev_alloc_skb(size);
2590 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2591 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2594 first_rxdp->Control_1 |= RXD_OWN_XENA;
2596 stats->mem_alloc_fail_cnt++;
2600 stats->mem_allocated += skb->truesize;
2602 if (ring->rxd_mode == RXD_MODE_1) {
2603 /* 1 buffer mode - normal operation mode */
2604 rxdp1 = (struct RxD1*)rxdp;
2605 memset(rxdp, 0, sizeof(struct RxD1));
2606 skb_reserve(skb, NET_IP_ALIGN);
2607 rxdp1->Buffer0_ptr = pci_map_single
2608 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2609 PCI_DMA_FROMDEVICE);
2610 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2611 goto pci_map_failed;
2614 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2615 rxdp->Host_Control = (unsigned long) (skb);
2616 } else if (ring->rxd_mode == RXD_MODE_3B) {
2619 * 2 buffer mode provides 128
2620 * byte aligned receive buffers.
2623 rxdp3 = (struct RxD3*)rxdp;
2624 /* save buffer pointers to avoid frequent dma mapping */
2625 Buffer0_ptr = rxdp3->Buffer0_ptr;
2626 Buffer1_ptr = rxdp3->Buffer1_ptr;
2627 memset(rxdp, 0, sizeof(struct RxD3));
2628 /* restore the buffer pointers for dma sync*/
2629 rxdp3->Buffer0_ptr = Buffer0_ptr;
2630 rxdp3->Buffer1_ptr = Buffer1_ptr;
2632 ba = &ring->ba[block_no][off];
2633 skb_reserve(skb, BUF0_LEN);
2634 tmp = (u64)(unsigned long) skb->data;
2637 skb->data = (void *) (unsigned long)tmp;
2638 skb_reset_tail_pointer(skb);
2640 /* AK: check is wrong. 0 can be valid dma address */
2641 if (!(rxdp3->Buffer0_ptr))
2642 rxdp3->Buffer0_ptr =
2643 pci_map_single(ring->pdev, ba->ba_0,
2644 BUF0_LEN, PCI_DMA_FROMDEVICE);
2646 pci_dma_sync_single_for_device(ring->pdev,
2647 (dma_addr_t) rxdp3->Buffer0_ptr,
2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
2649 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2650 goto pci_map_failed;
2652 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2653 if (ring->rxd_mode == RXD_MODE_3B) {
2654 /* Two buffer mode */
2657 * Buffer2 will have L3/L4 header plus
2660 rxdp3->Buffer2_ptr = pci_map_single
2661 (ring->pdev, skb->data, ring->mtu + 4,
2662 PCI_DMA_FROMDEVICE);
2664 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2665 goto pci_map_failed;
2667 /* AK: check is wrong */
2668 if (!rxdp3->Buffer1_ptr)
2669 rxdp3->Buffer1_ptr =
2670 pci_map_single(ring->pdev,
2672 PCI_DMA_FROMDEVICE);
2674 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
2677 (dma_addr_t)(unsigned long)
2680 PCI_DMA_FROMDEVICE);
2681 goto pci_map_failed;
2683 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2684 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2687 rxdp->Control_2 |= s2BIT(0);
2688 rxdp->Host_Control = (unsigned long) (skb);
2690 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2691 rxdp->Control_1 |= RXD_OWN_XENA;
2693 if (off == (ring->rxd_count + 1))
2695 ring->rx_curr_put_info.offset = off;
2697 rxdp->Control_2 |= SET_RXD_MARKER;
2698 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2701 first_rxdp->Control_1 |= RXD_OWN_XENA;
2705 ring->rx_bufs_left += 1;
2710 /* Transfer ownership of first descriptor to adapter just before
2711 * exiting. Before that, use memory barrier so that ownership
2712 * and other fields are seen by adapter correctly.
2716 first_rxdp->Control_1 |= RXD_OWN_XENA;
2721 stats->pci_map_fail_cnt++;
2722 stats->mem_freed += skb->truesize;
2723 dev_kfree_skb_irq(skb);
2727 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2729 struct net_device *dev = sp->dev;
2731 struct sk_buff *skb;
2733 struct mac_info *mac_control;
2738 mac_control = &sp->mac_control;
2739 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2740 rxdp = mac_control->rings[ring_no].
2741 rx_blocks[blk].rxds[j].virt_addr;
2742 skb = (struct sk_buff *)
2743 ((unsigned long) rxdp->Host_Control);
2747 if (sp->rxd_mode == RXD_MODE_1) {
2748 rxdp1 = (struct RxD1*)rxdp;
2749 pci_unmap_single(sp->pdev, (dma_addr_t)
2752 HEADER_ETHERNET_II_802_3_SIZE
2753 + HEADER_802_2_SIZE +
2755 PCI_DMA_FROMDEVICE);
2756 memset(rxdp, 0, sizeof(struct RxD1));
2757 } else if(sp->rxd_mode == RXD_MODE_3B) {
2758 rxdp3 = (struct RxD3*)rxdp;
2759 ba = &mac_control->rings[ring_no].
2761 pci_unmap_single(sp->pdev, (dma_addr_t)
2764 PCI_DMA_FROMDEVICE);
2765 pci_unmap_single(sp->pdev, (dma_addr_t)
2768 PCI_DMA_FROMDEVICE);
2769 pci_unmap_single(sp->pdev, (dma_addr_t)
2772 PCI_DMA_FROMDEVICE);
2773 memset(rxdp, 0, sizeof(struct RxD3));
2775 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2777 mac_control->rings[ring_no].rx_bufs_left -= 1;
2782 * free_rx_buffers - Frees all Rx buffers
2783 * @sp: device private variable.
2785 * This function will free all Rx buffers allocated by host.
2790 static void free_rx_buffers(struct s2io_nic *sp)
2792 struct net_device *dev = sp->dev;
2793 int i, blk = 0, buf_cnt = 0;
2794 struct mac_info *mac_control;
2795 struct config_param *config;
2797 mac_control = &sp->mac_control;
2798 config = &sp->config;
2800 for (i = 0; i < config->rx_ring_num; i++) {
2801 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2802 free_rxd_blk(sp,i,blk);
2804 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2805 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2806 mac_control->rings[i].rx_curr_put_info.offset = 0;
2807 mac_control->rings[i].rx_curr_get_info.offset = 0;
2808 mac_control->rings[i].rx_bufs_left = 0;
2809 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2810 dev->name, buf_cnt, i);
2814 static int s2io_chk_rx_buffers(struct ring_info *ring)
2816 if (fill_rx_buffers(ring) == -ENOMEM) {
2817 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2818 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2824 * s2io_poll - Rx interrupt handler for NAPI support
2825 * @napi : pointer to the napi structure.
2826 * @budget : The number of packets that were budgeted to be processed
2827 * during one pass through the 'Poll" function.
2829 * Comes into picture only if NAPI support has been incorporated. It does
2830 * the same thing that rx_intr_handler does, but not in a interrupt context
2831 * also It will process only a given number of packets.
2833 * 0 on success and 1 if there are No Rx packets to be processed.
2836 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2838 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2839 struct net_device *dev = ring->dev;
2840 struct config_param *config;
2841 struct mac_info *mac_control;
2842 int pkts_processed = 0;
2843 u8 __iomem *addr = NULL;
2845 struct s2io_nic *nic = dev->priv;
2846 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2847 int budget_org = budget;
2849 config = &nic->config;
2850 mac_control = &nic->mac_control;
2852 if (unlikely(!is_s2io_card_up(nic)))
2855 pkts_processed = rx_intr_handler(ring, budget);
2856 s2io_chk_rx_buffers(ring);
2858 if (pkts_processed < budget_org) {
2859 netif_rx_complete(dev, napi);
2860 /*Re Enable MSI-Rx Vector*/
2861 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2862 addr += 7 - ring->ring_no;
2863 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2867 return pkts_processed;
2869 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2871 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2872 struct ring_info *ring;
2873 struct net_device *dev = nic->dev;
2874 struct config_param *config;
2875 struct mac_info *mac_control;
2876 int pkts_processed = 0;
2877 int ring_pkts_processed, i;
2878 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2879 int budget_org = budget;
2881 config = &nic->config;
2882 mac_control = &nic->mac_control;
2884 if (unlikely(!is_s2io_card_up(nic)))
2887 for (i = 0; i < config->rx_ring_num; i++) {
2888 ring = &mac_control->rings[i];
2889 ring_pkts_processed = rx_intr_handler(ring, budget);
2890 s2io_chk_rx_buffers(ring);
2891 pkts_processed += ring_pkts_processed;
2892 budget -= ring_pkts_processed;
2896 if (pkts_processed < budget_org) {
2897 netif_rx_complete(dev, napi);
2898 /* Re enable the Rx interrupts for the ring */
2899 writeq(0, &bar0->rx_traffic_mask);
2900 readl(&bar0->rx_traffic_mask);
2902 return pkts_processed;
2905 #ifdef CONFIG_NET_POLL_CONTROLLER
2907 * s2io_netpoll - netpoll event handler entry point
2908 * @dev : pointer to the device structure.
2910 * This function will be called by upper layer to check for events on the
2911 * interface in situations where interrupts are disabled. It is used for
2912 * specific in-kernel networking tasks, such as remote consoles and kernel
2913 * debugging over the network (example netdump in RedHat).
2915 static void s2io_netpoll(struct net_device *dev)
2917 struct s2io_nic *nic = dev->priv;
2918 struct mac_info *mac_control;
2919 struct config_param *config;
2920 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2921 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2924 if (pci_channel_offline(nic->pdev))
2927 disable_irq(dev->irq);
2929 mac_control = &nic->mac_control;
2930 config = &nic->config;
2932 writeq(val64, &bar0->rx_traffic_int);
2933 writeq(val64, &bar0->tx_traffic_int);
2935 /* we need to free up the transmitted skbufs or else netpoll will
2936 * run out of skbs and will fail and eventually netpoll application such
2937 * as netdump will fail.
2939 for (i = 0; i < config->tx_fifo_num; i++)
2940 tx_intr_handler(&mac_control->fifos[i]);
2942 /* check for received packet and indicate up to network */
2943 for (i = 0; i < config->rx_ring_num; i++)
2944 rx_intr_handler(&mac_control->rings[i], 0);
2946 for (i = 0; i < config->rx_ring_num; i++) {
2947 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2948 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2949 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2953 enable_irq(dev->irq);
2959 * rx_intr_handler - Rx interrupt handler
2960 * @ring_info: per ring structure.
2961 * @budget: budget for napi processing.
2963 * If the interrupt is because of a received frame or if the
2964 * receive ring contains fresh as yet un-processed frames,this function is
2965 * called. It picks out the RxD at which place the last Rx processing had
2966 * stopped and sends the skb to the OSM's Rx handler and then increments
2969 * No. of napi packets processed.
2971 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2973 int get_block, put_block;
2974 struct rx_curr_get_info get_info, put_info;
2976 struct sk_buff *skb;
2977 int pkt_cnt = 0, napi_pkts = 0;
2982 get_info = ring_data->rx_curr_get_info;
2983 get_block = get_info.block_index;
2984 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2985 put_block = put_info.block_index;
2986 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2988 while (RXD_IS_UP2DT(rxdp)) {
2990 * If your are next to put index then it's
2991 * FIFO full condition
2993 if ((get_block == put_block) &&
2994 (get_info.offset + 1) == put_info.offset) {
2995 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2996 ring_data->dev->name);
2999 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3001 DBG_PRINT(ERR_DBG, "%s: The skb is ",
3002 ring_data->dev->name);
3003 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3006 if (ring_data->rxd_mode == RXD_MODE_1) {
3007 rxdp1 = (struct RxD1*)rxdp;
3008 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3011 HEADER_ETHERNET_II_802_3_SIZE +
3014 PCI_DMA_FROMDEVICE);
3015 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3016 rxdp3 = (struct RxD3*)rxdp;
3017 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3019 BUF0_LEN, PCI_DMA_FROMDEVICE);
3020 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3023 PCI_DMA_FROMDEVICE);
3025 prefetch(skb->data);
3026 rx_osm_handler(ring_data, rxdp);
3028 ring_data->rx_curr_get_info.offset = get_info.offset;
3029 rxdp = ring_data->rx_blocks[get_block].
3030 rxds[get_info.offset].virt_addr;
3031 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3032 get_info.offset = 0;
3033 ring_data->rx_curr_get_info.offset = get_info.offset;
3035 if (get_block == ring_data->block_count)
3037 ring_data->rx_curr_get_info.block_index = get_block;
3038 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3041 if (ring_data->nic->config.napi) {
3048 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3051 if (ring_data->lro) {
3052 /* Clear all LRO sessions before exiting */
3053 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3054 struct lro *lro = &ring_data->lro0_n[i];
3056 update_L3L4_header(ring_data->nic, lro);
3057 queue_rx_frame(lro->parent, lro->vlan_tag);
3058 clear_lro_session(lro);
3066 * tx_intr_handler - Transmit interrupt handler
3067 * @nic : device private variable
3069 * If an interrupt was raised to indicate DMA complete of the
3070 * Tx packet, this function is called. It identifies the last TxD
3071 * whose buffer was freed and frees all skbs whose data have already
3072 * DMA'ed into the NICs internal memory.
3077 static void tx_intr_handler(struct fifo_info *fifo_data)
3079 struct s2io_nic *nic = fifo_data->nic;
3080 struct tx_curr_get_info get_info, put_info;
3081 struct sk_buff *skb = NULL;
3084 unsigned long flags = 0;
3087 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3090 get_info = fifo_data->tx_curr_get_info;
3091 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3092 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3094 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3095 (get_info.offset != put_info.offset) &&
3096 (txdlp->Host_Control)) {
3097 /* Check for TxD errors */
3098 if (txdlp->Control_1 & TXD_T_CODE) {
3099 unsigned long long err;
3100 err = txdlp->Control_1 & TXD_T_CODE;
3102 nic->mac_control.stats_info->sw_stat.
3106 /* update t_code statistics */
3107 err_mask = err >> 48;
3110 nic->mac_control.stats_info->sw_stat.
3115 nic->mac_control.stats_info->sw_stat.
3116 tx_desc_abort_cnt++;
3120 nic->mac_control.stats_info->sw_stat.
3121 tx_parity_err_cnt++;
3125 nic->mac_control.stats_info->sw_stat.
3130 nic->mac_control.stats_info->sw_stat.
3131 tx_list_proc_err_cnt++;
3136 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3138 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3139 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3141 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3146 /* Updating the statistics block */
3147 nic->stats.tx_bytes += skb->len;
3148 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3149 dev_kfree_skb_irq(skb);
3152 if (get_info.offset == get_info.fifo_len + 1)
3153 get_info.offset = 0;
3154 txdlp = (struct TxD *) fifo_data->list_info
3155 [get_info.offset].list_virt_addr;
3156 fifo_data->tx_curr_get_info.offset =
3160 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3162 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3166 * s2io_mdio_write - Function to write in to MDIO registers
3167 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3168 * @addr : address value
3169 * @value : data value
3170 * @dev : pointer to net_device structure
3172 * This function is used to write values to the MDIO registers
3175 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3178 struct s2io_nic *sp = dev->priv;
3179 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3181 //address transaction
3182 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3183 | MDIO_MMD_DEV_ADDR(mmd_type)
3184 | MDIO_MMS_PRT_ADDR(0x0);
3185 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3187 writeq(val64, &bar0->mdio_control);
3192 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3193 | MDIO_MMD_DEV_ADDR(mmd_type)
3194 | MDIO_MMS_PRT_ADDR(0x0)
3195 | MDIO_MDIO_DATA(value)
3196 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3197 writeq(val64, &bar0->mdio_control);
3198 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3199 writeq(val64, &bar0->mdio_control);
3203 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3204 | MDIO_MMD_DEV_ADDR(mmd_type)
3205 | MDIO_MMS_PRT_ADDR(0x0)
3206 | MDIO_OP(MDIO_OP_READ_TRANS);
3207 writeq(val64, &bar0->mdio_control);
3208 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3209 writeq(val64, &bar0->mdio_control);
3215 * s2io_mdio_read - Function to write in to MDIO registers
3216 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3217 * @addr : address value
3218 * @dev : pointer to net_device structure
3220 * This function is used to read values to the MDIO registers
3223 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3227 struct s2io_nic *sp = dev->priv;
3228 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3230 /* address transaction */
3231 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3232 | MDIO_MMD_DEV_ADDR(mmd_type)
3233 | MDIO_MMS_PRT_ADDR(0x0);
3234 writeq(val64, &bar0->mdio_control);
3235 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3236 writeq(val64, &bar0->mdio_control);
3239 /* Data transaction */
3241 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3242 | MDIO_MMD_DEV_ADDR(mmd_type)
3243 | MDIO_MMS_PRT_ADDR(0x0)
3244 | MDIO_OP(MDIO_OP_READ_TRANS);
3245 writeq(val64, &bar0->mdio_control);
3246 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3247 writeq(val64, &bar0->mdio_control);
3250 /* Read the value from regs */
3251 rval64 = readq(&bar0->mdio_control);
3252 rval64 = rval64 & 0xFFFF0000;
3253 rval64 = rval64 >> 16;
3257 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3258 * @counter : couter value to be updated
3259 * @flag : flag to indicate the status
3260 * @type : counter type
3262 * This function is to check the status of the xpak counters value
3266 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3271 for(i = 0; i <index; i++)
3276 *counter = *counter + 1;
3277 val64 = *regs_stat & mask;
3278 val64 = val64 >> (index * 0x2);
3285 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3286 "service. Excessive temperatures may "
3287 "result in premature transceiver "
3291 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3292 "service Excessive bias currents may "
3293 "indicate imminent laser diode "
3297 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3298 "service Excessive laser output "
3299 "power may saturate far-end "
3303 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3308 val64 = val64 << (index * 0x2);
3309 *regs_stat = (*regs_stat & (~mask)) | (val64);
3312 *regs_stat = *regs_stat & (~mask);
3317 * s2io_updt_xpak_counter - Function to update the xpak counters
3318 * @dev : pointer to net_device struct
3320 * This function is to upate the status of the xpak counters value
3323 static void s2io_updt_xpak_counter(struct net_device *dev)
3331 struct s2io_nic *sp = dev->priv;
3332 struct stat_block *stat_info = sp->mac_control.stats_info;
3334 /* Check the communication with the MDIO slave */
3337 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3338 if((val64 == 0xFFFF) || (val64 == 0x0000))
3340 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3341 "Returned %llx\n", (unsigned long long)val64);
3345 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3348 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3349 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3350 (unsigned long long)val64);
3354 /* Loading the DOM register to MDIO register */
3356 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3357 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3359 /* Reading the Alarm flags */
3362 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3364 flag = CHECKBIT(val64, 0x7);
3366 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3367 &stat_info->xpak_stat.xpak_regs_stat,
3370 if(CHECKBIT(val64, 0x6))
3371 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3373 flag = CHECKBIT(val64, 0x3);
3375 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3376 &stat_info->xpak_stat.xpak_regs_stat,
3379 if(CHECKBIT(val64, 0x2))
3380 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3382 flag = CHECKBIT(val64, 0x1);
3384 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3385 &stat_info->xpak_stat.xpak_regs_stat,
3388 if(CHECKBIT(val64, 0x0))
3389 stat_info->xpak_stat.alarm_laser_output_power_low++;
3391 /* Reading the Warning flags */
3394 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3396 if(CHECKBIT(val64, 0x7))
3397 stat_info->xpak_stat.warn_transceiver_temp_high++;
3399 if(CHECKBIT(val64, 0x6))
3400 stat_info->xpak_stat.warn_transceiver_temp_low++;
3402 if(CHECKBIT(val64, 0x3))
3403 stat_info->xpak_stat.warn_laser_bias_current_high++;
3405 if(CHECKBIT(val64, 0x2))
3406 stat_info->xpak_stat.warn_laser_bias_current_low++;
3408 if(CHECKBIT(val64, 0x1))
3409 stat_info->xpak_stat.warn_laser_output_power_high++;
3411 if(CHECKBIT(val64, 0x0))
3412 stat_info->xpak_stat.warn_laser_output_power_low++;
3416 * wait_for_cmd_complete - waits for a command to complete.
3417 * @sp : private member of the device structure, which is a pointer to the
3418 * s2io_nic structure.
3419 * Description: Function that waits for a command to Write into RMAC
3420 * ADDR DATA registers to be completed and returns either success or
3421 * error depending on whether the command was complete or not.
3423 * SUCCESS on success and FAILURE on failure.
3426 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3429 int ret = FAILURE, cnt = 0, delay = 1;
3432 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3436 val64 = readq(addr);
3437 if (bit_state == S2IO_BIT_RESET) {
3438 if (!(val64 & busy_bit)) {
3443 if (!(val64 & busy_bit)) {
3460 * check_pci_device_id - Checks if the device id is supported
3462 * Description: Function to check if the pci device id is supported by driver.
3463 * Return value: Actual device id if supported else PCI_ANY_ID
3465 static u16 check_pci_device_id(u16 id)
3468 case PCI_DEVICE_ID_HERC_WIN:
3469 case PCI_DEVICE_ID_HERC_UNI:
3470 return XFRAME_II_DEVICE;
3471 case PCI_DEVICE_ID_S2IO_UNI:
3472 case PCI_DEVICE_ID_S2IO_WIN:
3473 return XFRAME_I_DEVICE;
3480 * s2io_reset - Resets the card.
3481 * @sp : private member of the device structure.
3482 * Description: Function to Reset the card. This function then also
3483 * restores the previously saved PCI configuration space registers as
3484 * the card reset also resets the configuration space.
3489 static void s2io_reset(struct s2io_nic * sp)
3491 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3496 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3497 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3499 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3500 __FUNCTION__, sp->dev->name);
3502 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3503 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3505 val64 = SW_RESET_ALL;
3506 writeq(val64, &bar0->sw_reset);
3507 if (strstr(sp->product_name, "CX4")) {
3511 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3513 /* Restore the PCI state saved during initialization. */
3514 pci_restore_state(sp->pdev);
3515 pci_read_config_word(sp->pdev, 0x2, &val16);
3516 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3521 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3522 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3525 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3529 /* Set swapper to enable I/O register access */
3530 s2io_set_swapper(sp);
3532 /* restore mac_addr entries */
3533 do_s2io_restore_unicast_mc(sp);
3535 /* Restore the MSIX table entries from local variables */
3536 restore_xmsi_data(sp);
3538 /* Clear certain PCI/PCI-X fields after reset */
3539 if (sp->device_type == XFRAME_II_DEVICE) {
3540 /* Clear "detected parity error" bit */
3541 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3543 /* Clearing PCIX Ecc status register */
3544 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3546 /* Clearing PCI_STATUS error reflected here */
3547 writeq(s2BIT(62), &bar0->txpic_int_reg);
3550 /* Reset device statistics maintained by OS */
3551 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3553 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3554 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3555 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3556 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3557 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3558 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3559 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3560 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3561 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3562 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3563 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3564 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3565 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3566 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3567 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3568 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3569 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3570 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3571 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3573 /* SXE-002: Configure link and activity LED to turn it off */
3574 subid = sp->pdev->subsystem_device;
3575 if (((subid & 0xFF) >= 0x07) &&
3576 (sp->device_type == XFRAME_I_DEVICE)) {
3577 val64 = readq(&bar0->gpio_control);
3578 val64 |= 0x0000800000000000ULL;
3579 writeq(val64, &bar0->gpio_control);
3580 val64 = 0x0411040400000000ULL;
3581 writeq(val64, (void __iomem *)bar0 + 0x2700);
3585 * Clear spurious ECC interrupts that would have occured on
3586 * XFRAME II cards after reset.
3588 if (sp->device_type == XFRAME_II_DEVICE) {
3589 val64 = readq(&bar0->pcc_err_reg);
3590 writeq(val64, &bar0->pcc_err_reg);
3593 sp->device_enabled_once = FALSE;
3597 * s2io_set_swapper - to set the swapper controle on the card
3598 * @sp : private member of the device structure,
3599 * pointer to the s2io_nic structure.
3600 * Description: Function to set the swapper control on the card
3601 * correctly depending on the 'endianness' of the system.
3603 * SUCCESS on success and FAILURE on failure.
3606 static int s2io_set_swapper(struct s2io_nic * sp)
3608 struct net_device *dev = sp->dev;
3609 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3610 u64 val64, valt, valr;
3613 * Set proper endian settings and verify the same by reading
3614 * the PIF Feed-back register.
3617 val64 = readq(&bar0->pif_rd_swapper_fb);
3618 if (val64 != 0x0123456789ABCDEFULL) {
3620 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3621 0x8100008181000081ULL, /* FE=1, SE=0 */
3622 0x4200004242000042ULL, /* FE=0, SE=1 */
3623 0}; /* FE=0, SE=0 */
3626 writeq(value[i], &bar0->swapper_ctrl);
3627 val64 = readq(&bar0->pif_rd_swapper_fb);
3628 if (val64 == 0x0123456789ABCDEFULL)
3633 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3635 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3636 (unsigned long long) val64);
3641 valr = readq(&bar0->swapper_ctrl);
3644 valt = 0x0123456789ABCDEFULL;
3645 writeq(valt, &bar0->xmsi_address);
3646 val64 = readq(&bar0->xmsi_address);
3650 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3651 0x0081810000818100ULL, /* FE=1, SE=0 */
3652 0x0042420000424200ULL, /* FE=0, SE=1 */
3653 0}; /* FE=0, SE=0 */
3656 writeq((value[i] | valr), &bar0->swapper_ctrl);
3657 writeq(valt, &bar0->xmsi_address);
3658 val64 = readq(&bar0->xmsi_address);
3664 unsigned long long x = val64;
3665 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3666 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3670 val64 = readq(&bar0->swapper_ctrl);
3671 val64 &= 0xFFFF000000000000ULL;
3675 * The device by default set to a big endian format, so a
3676 * big endian driver need not set anything.
3678 val64 |= (SWAPPER_CTRL_TXP_FE |
3679 SWAPPER_CTRL_TXP_SE |
3680 SWAPPER_CTRL_TXD_R_FE |
3681 SWAPPER_CTRL_TXD_W_FE |
3682 SWAPPER_CTRL_TXF_R_FE |
3683 SWAPPER_CTRL_RXD_R_FE |
3684 SWAPPER_CTRL_RXD_W_FE |
3685 SWAPPER_CTRL_RXF_W_FE |
3686 SWAPPER_CTRL_XMSI_FE |
3687 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3688 if (sp->config.intr_type == INTA)
3689 val64 |= SWAPPER_CTRL_XMSI_SE;
3690 writeq(val64, &bar0->swapper_ctrl);
3693 * Initially we enable all bits to make it accessible by the
3694 * driver, then we selectively enable only those bits that
3697 val64 |= (SWAPPER_CTRL_TXP_FE |
3698 SWAPPER_CTRL_TXP_SE |
3699 SWAPPER_CTRL_TXD_R_FE |
3700 SWAPPER_CTRL_TXD_R_SE |
3701 SWAPPER_CTRL_TXD_W_FE |
3702 SWAPPER_CTRL_TXD_W_SE |
3703 SWAPPER_CTRL_TXF_R_FE |
3704 SWAPPER_CTRL_RXD_R_FE |
3705 SWAPPER_CTRL_RXD_R_SE |
3706 SWAPPER_CTRL_RXD_W_FE |
3707 SWAPPER_CTRL_RXD_W_SE |
3708 SWAPPER_CTRL_RXF_W_FE |
3709 SWAPPER_CTRL_XMSI_FE |
3710 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3711 if (sp->config.intr_type == INTA)
3712 val64 |= SWAPPER_CTRL_XMSI_SE;
3713 writeq(val64, &bar0->swapper_ctrl);
3715 val64 = readq(&bar0->swapper_ctrl);
3718 * Verifying if endian settings are accurate by reading a
3719 * feedback register.
3721 val64 = readq(&bar0->pif_rd_swapper_fb);
3722 if (val64 != 0x0123456789ABCDEFULL) {
3723 /* Endian settings are incorrect, calls for another dekko. */
3724 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3726 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3727 (unsigned long long) val64);
3734 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3736 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3738 int ret = 0, cnt = 0;
3741 val64 = readq(&bar0->xmsi_access);
3742 if (!(val64 & s2BIT(15)))
3748 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3755 static void restore_xmsi_data(struct s2io_nic *nic)
3757 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3762 if (nic->device_type == XFRAME_I_DEVICE)
3765 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3766 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3767 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3768 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3769 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3770 writeq(val64, &bar0->xmsi_access);
3771 if (wait_for_msix_trans(nic, msix_index)) {
3772 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3778 static void store_xmsi_data(struct s2io_nic *nic)
3780 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3781 u64 val64, addr, data;
3784 if (nic->device_type == XFRAME_I_DEVICE)
3787 /* Store and display */
3788 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3789 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3790 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3791 writeq(val64, &bar0->xmsi_access);
3792 if (wait_for_msix_trans(nic, msix_index)) {
3793 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3796 addr = readq(&bar0->xmsi_address);
3797 data = readq(&bar0->xmsi_data);
3799 nic->msix_info[i].addr = addr;
3800 nic->msix_info[i].data = data;
3805 static int s2io_enable_msi_x(struct s2io_nic *nic)
3807 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3809 u16 msi_control; /* Temp variable */
3810 int ret, i, j, msix_indx = 1;
3812 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3814 if (!nic->entries) {
3815 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3817 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3820 nic->mac_control.stats_info->sw_stat.mem_allocated
3821 += (nic->num_entries * sizeof(struct msix_entry));
3823 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3826 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3828 if (!nic->s2io_entries) {
3829 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3831 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3832 kfree(nic->entries);
3833 nic->mac_control.stats_info->sw_stat.mem_freed
3834 += (nic->num_entries * sizeof(struct msix_entry));
3837 nic->mac_control.stats_info->sw_stat.mem_allocated
3838 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3839 memset(nic->s2io_entries, 0,
3840 nic->num_entries * sizeof(struct s2io_msix_entry));
3842 nic->entries[0].entry = 0;
3843 nic->s2io_entries[0].entry = 0;
3844 nic->s2io_entries[0].in_use = MSIX_FLG;
3845 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3846 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3848 for (i = 1; i < nic->num_entries; i++) {
3849 nic->entries[i].entry = ((i - 1) * 8) + 1;
3850 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3851 nic->s2io_entries[i].arg = NULL;
3852 nic->s2io_entries[i].in_use = 0;
3855 rx_mat = readq(&bar0->rx_mat);
3856 for (j = 0; j < nic->config.rx_ring_num; j++) {
3857 rx_mat |= RX_MAT_SET(j, msix_indx);
3858 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3859 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3860 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3863 writeq(rx_mat, &bar0->rx_mat);
3864 readq(&bar0->rx_mat);
3866 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3867 /* We fail init if error or we get less vectors than min required */
3869 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3870 kfree(nic->entries);
3871 nic->mac_control.stats_info->sw_stat.mem_freed
3872 += (nic->num_entries * sizeof(struct msix_entry));
3873 kfree(nic->s2io_entries);
3874 nic->mac_control.stats_info->sw_stat.mem_freed
3875 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3876 nic->entries = NULL;
3877 nic->s2io_entries = NULL;
3882 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3883 * in the herc NIC. (Temp change, needs to be removed later)
3885 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3886 msi_control |= 0x1; /* Enable MSI */
3887 pci_write_config_word(nic->pdev, 0x42, msi_control);
3892 /* Handle software interrupt used during MSI(X) test */
3893 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3895 struct s2io_nic *sp = dev_id;
3897 sp->msi_detected = 1;
3898 wake_up(&sp->msi_wait);
3903 /* Test interrupt path by forcing a a software IRQ */
3904 static int s2io_test_msi(struct s2io_nic *sp)
3906 struct pci_dev *pdev = sp->pdev;
3907 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3911 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3914 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3915 sp->dev->name, pci_name(pdev), pdev->irq);
3919 init_waitqueue_head (&sp->msi_wait);
3920 sp->msi_detected = 0;
3922 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3923 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3924 val64 |= SCHED_INT_CTRL_TIMER_EN;
3925 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3926 writeq(val64, &bar0->scheduled_int_ctrl);
3928 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3930 if (!sp->msi_detected) {
3931 /* MSI(X) test failed, go back to INTx mode */
3932 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3933 "using MSI(X) during test\n", sp->dev->name,
3939 free_irq(sp->entries[1].vector, sp);
3941 writeq(saved64, &bar0->scheduled_int_ctrl);
3946 static void remove_msix_isr(struct s2io_nic *sp)
3951 for (i = 0; i < sp->num_entries; i++) {
3952 if (sp->s2io_entries[i].in_use ==
3953 MSIX_REGISTERED_SUCCESS) {
3954 int vector = sp->entries[i].vector;
3955 void *arg = sp->s2io_entries[i].arg;
3956 free_irq(vector, arg);
3961 kfree(sp->s2io_entries);
3963 sp->s2io_entries = NULL;
3965 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3966 msi_control &= 0xFFFE; /* Disable MSI */
3967 pci_write_config_word(sp->pdev, 0x42, msi_control);
3969 pci_disable_msix(sp->pdev);
3972 static void remove_inta_isr(struct s2io_nic *sp)
3974 struct net_device *dev = sp->dev;
3976 free_irq(sp->pdev->irq, dev);
3979 /* ********************************************************* *
3980 * Functions defined below concern the OS part of the driver *
3981 * ********************************************************* */
3984 * s2io_open - open entry point of the driver
3985 * @dev : pointer to the device structure.
3987 * This function is the open entry point of the driver. It mainly calls a
3988 * function to allocate Rx buffers and inserts them into the buffer
3989 * descriptors and then enables the Rx part of the NIC.
3991 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3995 static int s2io_open(struct net_device *dev)
3997 struct s2io_nic *sp = dev->priv;
4001 * Make sure you have link off by default every time
4002 * Nic is initialized
4004 netif_carrier_off(dev);
4005 sp->last_link_state = 0;
4007 /* Initialize H/W and enable interrupts */
4008 err = s2io_card_up(sp);
4010 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4012 goto hw_init_failed;
4015 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4016 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4019 goto hw_init_failed;
4021 s2io_start_all_tx_queue(sp);
4025 if (sp->config.intr_type == MSI_X) {
4028 sp->mac_control.stats_info->sw_stat.mem_freed
4029 += (sp->num_entries * sizeof(struct msix_entry));
4031 if (sp->s2io_entries) {
4032 kfree(sp->s2io_entries);
4033 sp->mac_control.stats_info->sw_stat.mem_freed
4034 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4041 * s2io_close -close entry point of the driver
4042 * @dev : device pointer.
4044 * This is the stop entry point of the driver. It needs to undo exactly
4045 * whatever was done by the open entry point,thus it's usually referred to
4046 * as the close function.Among other things this function mainly stops the
4047 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4049 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4053 static int s2io_close(struct net_device *dev)
4055 struct s2io_nic *sp = dev->priv;
4056 struct config_param *config = &sp->config;
4060 /* Return if the device is already closed *
4061 * Can happen when s2io_card_up failed in change_mtu *
4063 if (!is_s2io_card_up(sp))
4066 s2io_stop_all_tx_queue(sp);
4067 /* delete all populated mac entries */
4068 for (offset = 1; offset < config->max_mc_addr; offset++) {
4069 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4070 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4071 do_s2io_delete_unicast_mc(sp, tmp64);
4080 * s2io_xmit - Tx entry point of te driver
4081 * @skb : the socket buffer containing the Tx data.
4082 * @dev : device pointer.
4084 * This function is the Tx entry point of the driver. S2IO NIC supports
4085 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4086 * NOTE: when device cant queue the pkt,just the trans_start variable will
4089 * 0 on success & 1 on failure.
4092 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4094 struct s2io_nic *sp = dev->priv;
4095 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4098 struct TxFIFO_element __iomem *tx_fifo;
4099 unsigned long flags = 0;
4101 struct fifo_info *fifo = NULL;
4102 struct mac_info *mac_control;
4103 struct config_param *config;
4104 int do_spin_lock = 1;
4106 int enable_per_list_interrupt = 0;
4107 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4109 mac_control = &sp->mac_control;
4110 config = &sp->config;
4112 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4114 if (unlikely(skb->len <= 0)) {
4115 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4116 dev_kfree_skb_any(skb);
4120 if (!is_s2io_card_up(sp)) {
4121 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4128 if (sp->vlgrp && vlan_tx_tag_present(skb))
4129 vlan_tag = vlan_tx_tag_get(skb);
4130 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4131 if (skb->protocol == htons(ETH_P_IP)) {
4136 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4137 th = (struct tcphdr *)(((unsigned char *)ip) +
4140 if (ip->protocol == IPPROTO_TCP) {
4141 queue_len = sp->total_tcp_fifos;
4142 queue = (ntohs(th->source) +
4144 sp->fifo_selector[queue_len - 1];
4145 if (queue >= queue_len)
4146 queue = queue_len - 1;
4147 } else if (ip->protocol == IPPROTO_UDP) {
4148 queue_len = sp->total_udp_fifos;
4149 queue = (ntohs(th->source) +
4151 sp->fifo_selector[queue_len - 1];
4152 if (queue >= queue_len)
4153 queue = queue_len - 1;
4154 queue += sp->udp_fifo_idx;
4155 if (skb->len > 1024)
4156 enable_per_list_interrupt = 1;
4161 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4162 /* get fifo number based on skb->priority value */
4163 queue = config->fifo_mapping
4164 [skb->priority & (MAX_TX_FIFOS - 1)];
4165 fifo = &mac_control->fifos[queue];
4168 spin_lock_irqsave(&fifo->tx_lock, flags);
4170 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4171 return NETDEV_TX_LOCKED;
4174 if (sp->config.multiq) {
4175 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4176 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4177 return NETDEV_TX_BUSY;
4179 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4180 if (netif_queue_stopped(dev)) {
4181 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4182 return NETDEV_TX_BUSY;
4186 put_off = (u16) fifo->tx_curr_put_info.offset;
4187 get_off = (u16) fifo->tx_curr_get_info.offset;
4188 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4190 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4191 /* Avoid "put" pointer going beyond "get" pointer */
4192 if (txdp->Host_Control ||
4193 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4194 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4195 s2io_stop_tx_queue(sp, fifo->fifo_no);
4197 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4201 offload_type = s2io_offload_type(skb);
4202 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4203 txdp->Control_1 |= TXD_TCP_LSO_EN;
4204 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4206 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4208 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4211 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4212 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4213 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4214 if (enable_per_list_interrupt)
4215 if (put_off & (queue_len >> 5))
4216 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4218 txdp->Control_2 |= TXD_VLAN_ENABLE;
4219 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4222 frg_len = skb->len - skb->data_len;
4223 if (offload_type == SKB_GSO_UDP) {
4226 ufo_size = s2io_udp_mss(skb);
4228 txdp->Control_1 |= TXD_UFO_EN;
4229 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4230 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4232 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4233 fifo->ufo_in_band_v[put_off] =
4234 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4236 fifo->ufo_in_band_v[put_off] =
4237 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4239 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4240 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4241 fifo->ufo_in_band_v,
4242 sizeof(u64), PCI_DMA_TODEVICE);
4243 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4244 goto pci_map_failed;
4248 txdp->Buffer_Pointer = pci_map_single
4249 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4250 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4251 goto pci_map_failed;
4253 txdp->Host_Control = (unsigned long) skb;
4254 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4255 if (offload_type == SKB_GSO_UDP)
4256 txdp->Control_1 |= TXD_UFO_EN;
4258 frg_cnt = skb_shinfo(skb)->nr_frags;
4259 /* For fragmented SKB. */
4260 for (i = 0; i < frg_cnt; i++) {
4261 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4262 /* A '0' length fragment will be ignored */
4266 txdp->Buffer_Pointer = (u64) pci_map_page
4267 (sp->pdev, frag->page, frag->page_offset,
4268 frag->size, PCI_DMA_TODEVICE);
4269 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4270 if (offload_type == SKB_GSO_UDP)
4271 txdp->Control_1 |= TXD_UFO_EN;
4273 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4275 if (offload_type == SKB_GSO_UDP)
4276 frg_cnt++; /* as Txd0 was used for inband header */
4278 tx_fifo = mac_control->tx_FIFO_start[queue];
4279 val64 = fifo->list_info[put_off].list_phy_addr;
4280 writeq(val64, &tx_fifo->TxDL_Pointer);
4282 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4285 val64 |= TX_FIFO_SPECIAL_FUNC;
4287 writeq(val64, &tx_fifo->List_Control);
4292 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4294 fifo->tx_curr_put_info.offset = put_off;
4296 /* Avoid "put" pointer going beyond "get" pointer */
4297 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4298 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4300 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4302 s2io_stop_tx_queue(sp, fifo->fifo_no);
4304 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4305 dev->trans_start = jiffies;
4306 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4308 if (sp->config.intr_type == MSI_X)
4309 tx_intr_handler(fifo);
4313 stats->pci_map_fail_cnt++;
4314 s2io_stop_tx_queue(sp, fifo->fifo_no);
4315 stats->mem_freed += skb->truesize;
4317 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4322 s2io_alarm_handle(unsigned long data)
4324 struct s2io_nic *sp = (struct s2io_nic *)data;
4325 struct net_device *dev = sp->dev;
4327 s2io_handle_errors(dev);
4328 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4331 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4333 struct ring_info *ring = (struct ring_info *)dev_id;
4334 struct s2io_nic *sp = ring->nic;
4335 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4336 struct net_device *dev = sp->dev;
4338 if (unlikely(!is_s2io_card_up(sp)))
4341 if (sp->config.napi) {
4342 u8 __iomem *addr = NULL;
4345 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4346 addr += (7 - ring->ring_no);
4347 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4350 netif_rx_schedule(dev, &ring->napi);
4352 rx_intr_handler(ring, 0);
4353 s2io_chk_rx_buffers(ring);
4359 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4362 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4363 struct s2io_nic *sp = fifos->nic;
4364 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4365 struct config_param *config = &sp->config;
4368 if (unlikely(!is_s2io_card_up(sp)))
4371 reason = readq(&bar0->general_int_status);
4372 if (unlikely(reason == S2IO_MINUS_ONE))
4373 /* Nothing much can be done. Get out */
4376 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4378 if (reason & GEN_INTR_TXTRAFFIC)
4379 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4381 for (i = 0; i < config->tx_fifo_num; i++)
4382 tx_intr_handler(&fifos[i]);
4384 writeq(sp->general_int_mask, &bar0->general_int_mask);
4385 readl(&bar0->general_int_status);
4390 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4392 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4395 val64 = readq(&bar0->pic_int_status);
4396 if (val64 & PIC_INT_GPIO) {
4397 val64 = readq(&bar0->gpio_int_reg);
4398 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4399 (val64 & GPIO_INT_REG_LINK_UP)) {
4401 * This is unstable state so clear both up/down
4402 * interrupt and adapter to re-evaluate the link state.
4404 val64 |= GPIO_INT_REG_LINK_DOWN;
4405 val64 |= GPIO_INT_REG_LINK_UP;
4406 writeq(val64, &bar0->gpio_int_reg);
4407 val64 = readq(&bar0->gpio_int_mask);
4408 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4409 GPIO_INT_MASK_LINK_DOWN);
4410 writeq(val64, &bar0->gpio_int_mask);
4412 else if (val64 & GPIO_INT_REG_LINK_UP) {
4413 val64 = readq(&bar0->adapter_status);
4414 /* Enable Adapter */
4415 val64 = readq(&bar0->adapter_control);
4416 val64 |= ADAPTER_CNTL_EN;
4417 writeq(val64, &bar0->adapter_control);
4418 val64 |= ADAPTER_LED_ON;
4419 writeq(val64, &bar0->adapter_control);
4420 if (!sp->device_enabled_once)
4421 sp->device_enabled_once = 1;
4423 s2io_link(sp, LINK_UP);
4425 * unmask link down interrupt and mask link-up
4428 val64 = readq(&bar0->gpio_int_mask);
4429 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4430 val64 |= GPIO_INT_MASK_LINK_UP;
4431 writeq(val64, &bar0->gpio_int_mask);
4433 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4434 val64 = readq(&bar0->adapter_status);
4435 s2io_link(sp, LINK_DOWN);
4436 /* Link is down so unmaks link up interrupt */
4437 val64 = readq(&bar0->gpio_int_mask);
4438 val64 &= ~GPIO_INT_MASK_LINK_UP;
4439 val64 |= GPIO_INT_MASK_LINK_DOWN;
4440 writeq(val64, &bar0->gpio_int_mask);
4443 val64 = readq(&bar0->adapter_control);
4444 val64 = val64 &(~ADAPTER_LED_ON);
4445 writeq(val64, &bar0->adapter_control);
4448 val64 = readq(&bar0->gpio_int_mask);
4452 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4453 * @value: alarm bits
4454 * @addr: address value
4455 * @cnt: counter variable
4456 * Description: Check for alarm and increment the counter
4458 * 1 - if alarm bit set
4459 * 0 - if alarm bit is not set
4461 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4462 unsigned long long *cnt)
4465 val64 = readq(addr);
4466 if ( val64 & value ) {
4467 writeq(val64, addr);
4476 * s2io_handle_errors - Xframe error indication handler
4477 * @nic: device private variable
4478 * Description: Handle alarms such as loss of link, single or
4479 * double ECC errors, critical and serious errors.
4483 static void s2io_handle_errors(void * dev_id)
4485 struct net_device *dev = (struct net_device *) dev_id;
4486 struct s2io_nic *sp = dev->priv;
4487 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4488 u64 temp64 = 0,val64=0;
4491 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4492 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4494 if (!is_s2io_card_up(sp))
4497 if (pci_channel_offline(sp->pdev))
4500 memset(&sw_stat->ring_full_cnt, 0,
4501 sizeof(sw_stat->ring_full_cnt));
4503 /* Handling the XPAK counters update */
4504 if(stats->xpak_timer_count < 72000) {
4505 /* waiting for an hour */
4506 stats->xpak_timer_count++;
4508 s2io_updt_xpak_counter(dev);
4509 /* reset the count to zero */
4510 stats->xpak_timer_count = 0;
4513 /* Handling link status change error Intr */
4514 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4515 val64 = readq(&bar0->mac_rmac_err_reg);
4516 writeq(val64, &bar0->mac_rmac_err_reg);
4517 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4518 schedule_work(&sp->set_link_task);
4521 /* In case of a serious error, the device will be Reset. */
4522 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4523 &sw_stat->serious_err_cnt))
4526 /* Check for data parity error */
4527 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4528 &sw_stat->parity_err_cnt))
4531 /* Check for ring full counter */
4532 if (sp->device_type == XFRAME_II_DEVICE) {
4533 val64 = readq(&bar0->ring_bump_counter1);
4534 for (i=0; i<4; i++) {
4535 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4536 temp64 >>= 64 - ((i+1)*16);
4537 sw_stat->ring_full_cnt[i] += temp64;
4540 val64 = readq(&bar0->ring_bump_counter2);
4541 for (i=0; i<4; i++) {
4542 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4543 temp64 >>= 64 - ((i+1)*16);
4544 sw_stat->ring_full_cnt[i+4] += temp64;
4548 val64 = readq(&bar0->txdma_int_status);
4549 /*check for pfc_err*/
4550 if (val64 & TXDMA_PFC_INT) {
4551 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4552 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4553 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4554 &sw_stat->pfc_err_cnt))
4556 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4557 &sw_stat->pfc_err_cnt);
4560 /*check for tda_err*/
4561 if (val64 & TXDMA_TDA_INT) {
4562 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4563 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4564 &sw_stat->tda_err_cnt))
4566 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4567 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4569 /*check for pcc_err*/
4570 if (val64 & TXDMA_PCC_INT) {
4571 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4572 | PCC_N_SERR | PCC_6_COF_OV_ERR
4573 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4574 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4575 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4576 &sw_stat->pcc_err_cnt))
4578 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4579 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4582 /*check for tti_err*/
4583 if (val64 & TXDMA_TTI_INT) {
4584 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4585 &sw_stat->tti_err_cnt))
4587 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4588 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4591 /*check for lso_err*/
4592 if (val64 & TXDMA_LSO_INT) {
4593 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4594 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4595 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4597 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4598 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4601 /*check for tpa_err*/
4602 if (val64 & TXDMA_TPA_INT) {
4603 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4604 &sw_stat->tpa_err_cnt))
4606 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4607 &sw_stat->tpa_err_cnt);
4610 /*check for sm_err*/
4611 if (val64 & TXDMA_SM_INT) {
4612 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4613 &sw_stat->sm_err_cnt))
4617 val64 = readq(&bar0->mac_int_status);
4618 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4619 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4620 &bar0->mac_tmac_err_reg,
4621 &sw_stat->mac_tmac_err_cnt))
4623 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4624 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4625 &bar0->mac_tmac_err_reg,
4626 &sw_stat->mac_tmac_err_cnt);
4629 val64 = readq(&bar0->xgxs_int_status);
4630 if (val64 & XGXS_INT_STATUS_TXGXS) {
4631 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4632 &bar0->xgxs_txgxs_err_reg,
4633 &sw_stat->xgxs_txgxs_err_cnt))
4635 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4636 &bar0->xgxs_txgxs_err_reg,
4637 &sw_stat->xgxs_txgxs_err_cnt);
4640 val64 = readq(&bar0->rxdma_int_status);
4641 if (val64 & RXDMA_INT_RC_INT_M) {
4642 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4643 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4644 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4646 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4647 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4648 &sw_stat->rc_err_cnt);
4649 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4650 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4651 &sw_stat->prc_pcix_err_cnt))
4653 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4654 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4655 &sw_stat->prc_pcix_err_cnt);
4658 if (val64 & RXDMA_INT_RPA_INT_M) {
4659 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4660 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4662 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4663 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4666 if (val64 & RXDMA_INT_RDA_INT_M) {
4667 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4668 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4669 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4670 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4672 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4673 | RDA_MISC_ERR | RDA_PCIX_ERR,
4674 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4677 if (val64 & RXDMA_INT_RTI_INT_M) {
4678 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4679 &sw_stat->rti_err_cnt))
4681 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4682 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4685 val64 = readq(&bar0->mac_int_status);
4686 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4687 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4688 &bar0->mac_rmac_err_reg,
4689 &sw_stat->mac_rmac_err_cnt))
4691 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4692 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4693 &sw_stat->mac_rmac_err_cnt);
4696 val64 = readq(&bar0->xgxs_int_status);
4697 if (val64 & XGXS_INT_STATUS_RXGXS) {
4698 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4699 &bar0->xgxs_rxgxs_err_reg,
4700 &sw_stat->xgxs_rxgxs_err_cnt))
4704 val64 = readq(&bar0->mc_int_status);
4705 if(val64 & MC_INT_STATUS_MC_INT) {
4706 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4707 &sw_stat->mc_err_cnt))
4710 /* Handling Ecc errors */
4711 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4712 writeq(val64, &bar0->mc_err_reg);
4713 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4714 sw_stat->double_ecc_errs++;
4715 if (sp->device_type != XFRAME_II_DEVICE) {
4717 * Reset XframeI only if critical error
4720 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4721 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4725 sw_stat->single_ecc_errs++;
4731 s2io_stop_all_tx_queue(sp);
4732 schedule_work(&sp->rst_timer_task);
4733 sw_stat->soft_reset_cnt++;
4738 * s2io_isr - ISR handler of the device .
4739 * @irq: the irq of the device.
4740 * @dev_id: a void pointer to the dev structure of the NIC.
4741 * Description: This function is the ISR handler of the device. It
4742 * identifies the reason for the interrupt and calls the relevant
4743 * service routines. As a contongency measure, this ISR allocates the
4744 * recv buffers, if their numbers are below the panic value which is
4745 * presently set to 25% of the original number of rcv buffers allocated.
4747 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4748 * IRQ_NONE: will be returned if interrupt is not from our device
4750 static irqreturn_t s2io_isr(int irq, void *dev_id)
4752 struct net_device *dev = (struct net_device *) dev_id;
4753 struct s2io_nic *sp = dev->priv;
4754 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 struct mac_info *mac_control;
4758 struct config_param *config;
4760 /* Pretend we handled any irq's from a disconnected card */
4761 if (pci_channel_offline(sp->pdev))
4764 if (!is_s2io_card_up(sp))
4767 mac_control = &sp->mac_control;
4768 config = &sp->config;
4771 * Identify the cause for interrupt and call the appropriate
4772 * interrupt handler. Causes for the interrupt could be;
4777 reason = readq(&bar0->general_int_status);
4779 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4780 /* Nothing much can be done. Get out */
4784 if (reason & (GEN_INTR_RXTRAFFIC |
4785 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4787 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4790 if (reason & GEN_INTR_RXTRAFFIC) {
4791 netif_rx_schedule(dev, &sp->napi);
4792 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4794 readl(&bar0->rx_traffic_int);
4798 * rx_traffic_int reg is an R1 register, writing all 1's
4799 * will ensure that the actual interrupt causing bit
4800 * get's cleared and hence a read can be avoided.
4802 if (reason & GEN_INTR_RXTRAFFIC)
4803 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805 for (i = 0; i < config->rx_ring_num; i++)
4806 rx_intr_handler(&mac_control->rings[i], 0);
4810 * tx_traffic_int reg is an R1 register, writing all 1's
4811 * will ensure that the actual interrupt causing bit get's
4812 * cleared and hence a read can be avoided.
4814 if (reason & GEN_INTR_TXTRAFFIC)
4815 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4817 for (i = 0; i < config->tx_fifo_num; i++)
4818 tx_intr_handler(&mac_control->fifos[i]);
4820 if (reason & GEN_INTR_TXPIC)
4821 s2io_txpic_intr_handle(sp);
4824 * Reallocate the buffers from the interrupt handler itself.
4826 if (!config->napi) {
4827 for (i = 0; i < config->rx_ring_num; i++)
4828 s2io_chk_rx_buffers(&mac_control->rings[i]);
4830 writeq(sp->general_int_mask, &bar0->general_int_mask);
4831 readl(&bar0->general_int_status);
4837 /* The interrupt was not raised by us */
4847 static void s2io_updt_stats(struct s2io_nic *sp)
4849 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4853 if (is_s2io_card_up(sp)) {
4854 /* Apprx 30us on a 133 MHz bus */
4855 val64 = SET_UPDT_CLICKS(10) |
4856 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4857 writeq(val64, &bar0->stat_cfg);
4860 val64 = readq(&bar0->stat_cfg);
4861 if (!(val64 & s2BIT(0)))
4865 break; /* Updt failed */
4871 * s2io_get_stats - Updates the device statistics structure.
4872 * @dev : pointer to the device structure.
4874 * This function updates the device statistics structure in the s2io_nic
4875 * structure and returns a pointer to the same.
4877 * pointer to the updated net_device_stats structure.
4880 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4882 struct s2io_nic *sp = dev->priv;
4883 struct mac_info *mac_control;
4884 struct config_param *config;
4888 mac_control = &sp->mac_control;
4889 config = &sp->config;
4891 /* Configure Stats for immediate updt */
4892 s2io_updt_stats(sp);
4894 sp->stats.tx_packets =
4895 le32_to_cpu(mac_control->stats_info->tmac_frms);
4896 sp->stats.tx_errors =
4897 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4898 sp->stats.rx_errors =
4899 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4900 sp->stats.multicast =
4901 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4902 sp->stats.rx_length_errors =
4903 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4905 /* collect per-ring rx_packets and rx_bytes */
4906 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4907 for (i = 0; i < config->rx_ring_num; i++) {
4908 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4909 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4912 return (&sp->stats);
4916 * s2io_set_multicast - entry point for multicast address enable/disable.
4917 * @dev : pointer to the device structure
4919 * This function is a driver entry point which gets called by the kernel
4920 * whenever multicast addresses must be enabled/disabled. This also gets
4921 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4922 * determine, if multicast address must be enabled or if promiscuous mode
4923 * is to be disabled etc.
4928 static void s2io_set_multicast(struct net_device *dev)
4931 struct dev_mc_list *mclist;
4932 struct s2io_nic *sp = dev->priv;
4933 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4934 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4936 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4938 struct config_param *config = &sp->config;
4940 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4941 /* Enable all Multicast addresses */
4942 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4943 &bar0->rmac_addr_data0_mem);
4944 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4945 &bar0->rmac_addr_data1_mem);
4946 val64 = RMAC_ADDR_CMD_MEM_WE |
4947 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4948 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4949 writeq(val64, &bar0->rmac_addr_cmd_mem);
4950 /* Wait till command completes */
4951 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4952 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4956 sp->all_multi_pos = config->max_mc_addr - 1;
4957 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4958 /* Disable all Multicast addresses */
4959 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4960 &bar0->rmac_addr_data0_mem);
4961 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4962 &bar0->rmac_addr_data1_mem);
4963 val64 = RMAC_ADDR_CMD_MEM_WE |
4964 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4965 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4966 writeq(val64, &bar0->rmac_addr_cmd_mem);
4967 /* Wait till command completes */
4968 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4969 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4973 sp->all_multi_pos = 0;
4976 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4977 /* Put the NIC into promiscuous mode */
4978 add = &bar0->mac_cfg;
4979 val64 = readq(&bar0->mac_cfg);
4980 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4982 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4983 writel((u32) val64, add);
4984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4985 writel((u32) (val64 >> 32), (add + 4));
4987 if (vlan_tag_strip != 1) {
4988 val64 = readq(&bar0->rx_pa_cfg);
4989 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4990 writeq(val64, &bar0->rx_pa_cfg);
4991 vlan_strip_flag = 0;
4994 val64 = readq(&bar0->mac_cfg);
4995 sp->promisc_flg = 1;
4996 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4998 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4999 /* Remove the NIC from promiscuous mode */
5000 add = &bar0->mac_cfg;
5001 val64 = readq(&bar0->mac_cfg);
5002 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5004 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5005 writel((u32) val64, add);
5006 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5007 writel((u32) (val64 >> 32), (add + 4));
5009 if (vlan_tag_strip != 0) {
5010 val64 = readq(&bar0->rx_pa_cfg);
5011 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5012 writeq(val64, &bar0->rx_pa_cfg);
5013 vlan_strip_flag = 1;
5016 val64 = readq(&bar0->mac_cfg);
5017 sp->promisc_flg = 0;
5018 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5022 /* Update individual M_CAST address list */
5023 if ((!sp->m_cast_flg) && dev->mc_count) {
5025 (config->max_mc_addr - config->max_mac_addr)) {
5026 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5028 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5029 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5033 prev_cnt = sp->mc_addr_count;
5034 sp->mc_addr_count = dev->mc_count;
5036 /* Clear out the previous list of Mc in the H/W. */
5037 for (i = 0; i < prev_cnt; i++) {
5038 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5039 &bar0->rmac_addr_data0_mem);
5040 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5041 &bar0->rmac_addr_data1_mem);
5042 val64 = RMAC_ADDR_CMD_MEM_WE |
5043 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5044 RMAC_ADDR_CMD_MEM_OFFSET
5045 (config->mc_start_offset + i);
5046 writeq(val64, &bar0->rmac_addr_cmd_mem);
5048 /* Wait for command completes */
5049 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5050 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5052 DBG_PRINT(ERR_DBG, "%s: Adding ",
5054 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5059 /* Create the new Rx filter list and update the same in H/W. */
5060 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5061 i++, mclist = mclist->next) {
5062 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5065 for (j = 0; j < ETH_ALEN; j++) {
5066 mac_addr |= mclist->dmi_addr[j];
5070 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5071 &bar0->rmac_addr_data0_mem);
5072 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5073 &bar0->rmac_addr_data1_mem);
5074 val64 = RMAC_ADDR_CMD_MEM_WE |
5075 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5076 RMAC_ADDR_CMD_MEM_OFFSET
5077 (i + config->mc_start_offset);
5078 writeq(val64, &bar0->rmac_addr_cmd_mem);
5080 /* Wait for command completes */
5081 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5082 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5084 DBG_PRINT(ERR_DBG, "%s: Adding ",
5086 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5093 /* read from CAM unicast & multicast addresses and store it in
5094 * def_mac_addr structure
5096 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5100 struct config_param *config = &sp->config;
5102 /* store unicast & multicast mac addresses */
5103 for (offset = 0; offset < config->max_mc_addr; offset++) {
5104 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5105 /* if read fails disable the entry */
5106 if (mac_addr == FAILURE)
5107 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5108 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5112 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5113 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5116 struct config_param *config = &sp->config;
5117 /* restore unicast mac address */
5118 for (offset = 0; offset < config->max_mac_addr; offset++)
5119 do_s2io_prog_unicast(sp->dev,
5120 sp->def_mac_addr[offset].mac_addr);
5122 /* restore multicast mac address */
5123 for (offset = config->mc_start_offset;
5124 offset < config->max_mc_addr; offset++)
5125 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5128 /* add a multicast MAC address to CAM */
5129 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5133 struct config_param *config = &sp->config;
5135 for (i = 0; i < ETH_ALEN; i++) {
5137 mac_addr |= addr[i];
5139 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5142 /* check if the multicast mac already preset in CAM */
5143 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5145 tmp64 = do_s2io_read_unicast_mc(sp, i);
5146 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5149 if (tmp64 == mac_addr)
5152 if (i == config->max_mc_addr) {
5154 "CAM full no space left for multicast MAC\n");
5157 /* Update the internal structure with this new mac address */
5158 do_s2io_copy_mac_addr(sp, i, mac_addr);
5160 return (do_s2io_add_mac(sp, mac_addr, i));
5163 /* add MAC address to CAM */
5164 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5167 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5169 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5170 &bar0->rmac_addr_data0_mem);
5173 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5174 RMAC_ADDR_CMD_MEM_OFFSET(off);
5175 writeq(val64, &bar0->rmac_addr_cmd_mem);
5177 /* Wait till command completes */
5178 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5179 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5181 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5186 /* deletes a specified unicast/multicast mac entry from CAM */
5187 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5190 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5191 struct config_param *config = &sp->config;
5194 offset < config->max_mc_addr; offset++) {
5195 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5196 if (tmp64 == addr) {
5197 /* disable the entry by writing 0xffffffffffffULL */
5198 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5200 /* store the new mac list from CAM */
5201 do_s2io_store_unicast_mc(sp);
5205 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5206 (unsigned long long)addr);
5210 /* read mac entries from CAM */
5211 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5213 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5214 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5218 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5219 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5220 writeq(val64, &bar0->rmac_addr_cmd_mem);
5222 /* Wait till command completes */
5223 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5224 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5226 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5229 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5230 return (tmp64 >> 16);
5234 * s2io_set_mac_addr driver entry point
5237 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5239 struct sockaddr *addr = p;
5241 if (!is_valid_ether_addr(addr->sa_data))
5244 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5246 /* store the MAC address in CAM */
5247 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5250 * do_s2io_prog_unicast - Programs the Xframe mac address
5251 * @dev : pointer to the device structure.
5252 * @addr: a uchar pointer to the new mac address which is to be set.
5253 * Description : This procedure will program the Xframe to receive
5254 * frames with new Mac Address
5255 * Return value: SUCCESS on success and an appropriate (-)ve integer
5256 * as defined in errno.h file on failure.
5259 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5261 struct s2io_nic *sp = dev->priv;
5262 register u64 mac_addr = 0, perm_addr = 0;
5265 struct config_param *config = &sp->config;
5268 * Set the new MAC address as the new unicast filter and reflect this
5269 * change on the device address registered with the OS. It will be
5272 for (i = 0; i < ETH_ALEN; i++) {
5274 mac_addr |= addr[i];
5276 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5279 /* check if the dev_addr is different than perm_addr */
5280 if (mac_addr == perm_addr)
5283 /* check if the mac already preset in CAM */
5284 for (i = 1; i < config->max_mac_addr; i++) {
5285 tmp64 = do_s2io_read_unicast_mc(sp, i);
5286 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5289 if (tmp64 == mac_addr) {
5291 "MAC addr:0x%llx already present in CAM\n",
5292 (unsigned long long)mac_addr);
5296 if (i == config->max_mac_addr) {
5297 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5300 /* Update the internal structure with this new mac address */
5301 do_s2io_copy_mac_addr(sp, i, mac_addr);
5302 return (do_s2io_add_mac(sp, mac_addr, i));
5306 * s2io_ethtool_sset - Sets different link parameters.
5307 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5308 * @info: pointer to the structure with parameters given by ethtool to set
5311 * The function sets different link parameters provided by the user onto
5317 static int s2io_ethtool_sset(struct net_device *dev,
5318 struct ethtool_cmd *info)
5320 struct s2io_nic *sp = dev->priv;
5321 if ((info->autoneg == AUTONEG_ENABLE) ||
5322 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5325 s2io_close(sp->dev);
5333 * s2io_ethtol_gset - Return link specific information.
5334 * @sp : private member of the device structure, pointer to the
5335 * s2io_nic structure.
5336 * @info : pointer to the structure with parameters given by ethtool
5337 * to return link information.
5339 * Returns link specific information like speed, duplex etc.. to ethtool.
5341 * return 0 on success.
5344 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5346 struct s2io_nic *sp = dev->priv;
5347 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5348 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5349 info->port = PORT_FIBRE;
5351 /* info->transceiver */
5352 info->transceiver = XCVR_EXTERNAL;
5354 if (netif_carrier_ok(sp->dev)) {
5355 info->speed = 10000;
5356 info->duplex = DUPLEX_FULL;
5362 info->autoneg = AUTONEG_DISABLE;
5367 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5368 * @sp : private member of the device structure, which is a pointer to the
5369 * s2io_nic structure.
5370 * @info : pointer to the structure with parameters given by ethtool to
5371 * return driver information.
5373 * Returns driver specefic information like name, version etc.. to ethtool.
5378 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5379 struct ethtool_drvinfo *info)
5381 struct s2io_nic *sp = dev->priv;
5383 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5384 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5385 strncpy(info->fw_version, "", sizeof(info->fw_version));
5386 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5387 info->regdump_len = XENA_REG_SPACE;
5388 info->eedump_len = XENA_EEPROM_SPACE;
5392 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5393 * @sp: private member of the device structure, which is a pointer to the
5394 * s2io_nic structure.
5395 * @regs : pointer to the structure with parameters given by ethtool for
5396 * dumping the registers.
5397 * @reg_space: The input argumnet into which all the registers are dumped.
5399 * Dumps the entire register space of xFrame NIC into the user given
5405 static void s2io_ethtool_gregs(struct net_device *dev,
5406 struct ethtool_regs *regs, void *space)
5410 u8 *reg_space = (u8 *) space;
5411 struct s2io_nic *sp = dev->priv;
5413 regs->len = XENA_REG_SPACE;
5414 regs->version = sp->pdev->subsystem_device;
5416 for (i = 0; i < regs->len; i += 8) {
5417 reg = readq(sp->bar0 + i);
5418 memcpy((reg_space + i), ®, 8);
5423 * s2io_phy_id - timer function that alternates adapter LED.
5424 * @data : address of the private member of the device structure, which
5425 * is a pointer to the s2io_nic structure, provided as an u32.
5426 * Description: This is actually the timer function that alternates the
5427 * adapter LED bit of the adapter control bit to set/reset every time on
5428 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5429 * once every second.
5431 static void s2io_phy_id(unsigned long data)
5433 struct s2io_nic *sp = (struct s2io_nic *) data;
5434 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5438 subid = sp->pdev->subsystem_device;
5439 if ((sp->device_type == XFRAME_II_DEVICE) ||
5440 ((subid & 0xFF) >= 0x07)) {
5441 val64 = readq(&bar0->gpio_control);
5442 val64 ^= GPIO_CTRL_GPIO_0;
5443 writeq(val64, &bar0->gpio_control);
5445 val64 = readq(&bar0->adapter_control);
5446 val64 ^= ADAPTER_LED_ON;
5447 writeq(val64, &bar0->adapter_control);
5450 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5454 * s2io_ethtool_idnic - To physically identify the nic on the system.
5455 * @sp : private member of the device structure, which is a pointer to the
5456 * s2io_nic structure.
5457 * @id : pointer to the structure with identification parameters given by
5459 * Description: Used to physically identify the NIC on the system.
5460 * The Link LED will blink for a time specified by the user for
5462 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5463 * identification is possible only if it's link is up.
5465 * int , returns 0 on success
5468 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5470 u64 val64 = 0, last_gpio_ctrl_val;
5471 struct s2io_nic *sp = dev->priv;
5472 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5475 subid = sp->pdev->subsystem_device;
5476 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5477 if ((sp->device_type == XFRAME_I_DEVICE) &&
5478 ((subid & 0xFF) < 0x07)) {
5479 val64 = readq(&bar0->adapter_control);
5480 if (!(val64 & ADAPTER_CNTL_EN)) {
5482 "Adapter Link down, cannot blink LED\n");
5486 if (sp->id_timer.function == NULL) {
5487 init_timer(&sp->id_timer);
5488 sp->id_timer.function = s2io_phy_id;
5489 sp->id_timer.data = (unsigned long) sp;
5491 mod_timer(&sp->id_timer, jiffies);
5493 msleep_interruptible(data * HZ);
5495 msleep_interruptible(MAX_FLICKER_TIME);
5496 del_timer_sync(&sp->id_timer);
5498 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5499 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5500 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5506 static void s2io_ethtool_gringparam(struct net_device *dev,
5507 struct ethtool_ringparam *ering)
5509 struct s2io_nic *sp = dev->priv;
5510 int i,tx_desc_count=0,rx_desc_count=0;
5512 if (sp->rxd_mode == RXD_MODE_1)
5513 ering->rx_max_pending = MAX_RX_DESC_1;
5514 else if (sp->rxd_mode == RXD_MODE_3B)
5515 ering->rx_max_pending = MAX_RX_DESC_2;
5517 ering->tx_max_pending = MAX_TX_DESC;
5518 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5519 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5521 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5522 ering->tx_pending = tx_desc_count;
5524 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5525 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5527 ering->rx_pending = rx_desc_count;
5529 ering->rx_mini_max_pending = 0;
5530 ering->rx_mini_pending = 0;
5531 if(sp->rxd_mode == RXD_MODE_1)
5532 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5533 else if (sp->rxd_mode == RXD_MODE_3B)
5534 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5535 ering->rx_jumbo_pending = rx_desc_count;
5539 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5540 * @sp : private member of the device structure, which is a pointer to the
5541 * s2io_nic structure.
5542 * @ep : pointer to the structure with pause parameters given by ethtool.
5544 * Returns the Pause frame generation and reception capability of the NIC.
5548 static void s2io_ethtool_getpause_data(struct net_device *dev,
5549 struct ethtool_pauseparam *ep)
5552 struct s2io_nic *sp = dev->priv;
5553 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5555 val64 = readq(&bar0->rmac_pause_cfg);
5556 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5557 ep->tx_pause = TRUE;
5558 if (val64 & RMAC_PAUSE_RX_ENABLE)
5559 ep->rx_pause = TRUE;
5560 ep->autoneg = FALSE;
5564 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5565 * @sp : private member of the device structure, which is a pointer to the
5566 * s2io_nic structure.
5567 * @ep : pointer to the structure with pause parameters given by ethtool.
5569 * It can be used to set or reset Pause frame generation or reception
5570 * support of the NIC.
5572 * int, returns 0 on Success
5575 static int s2io_ethtool_setpause_data(struct net_device *dev,
5576 struct ethtool_pauseparam *ep)
5579 struct s2io_nic *sp = dev->priv;
5580 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5582 val64 = readq(&bar0->rmac_pause_cfg);
5584 val64 |= RMAC_PAUSE_GEN_ENABLE;
5586 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5588 val64 |= RMAC_PAUSE_RX_ENABLE;
5590 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5591 writeq(val64, &bar0->rmac_pause_cfg);
5596 * read_eeprom - reads 4 bytes of data from user given offset.
5597 * @sp : private member of the device structure, which is a pointer to the
5598 * s2io_nic structure.
5599 * @off : offset at which the data must be written
5600 * @data : Its an output parameter where the data read at the given
5603 * Will read 4 bytes of data from the user given offset and return the
5605 * NOTE: Will allow to read only part of the EEPROM visible through the
5608 * -1 on failure and 0 on success.
5611 #define S2IO_DEV_ID 5
5612 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5617 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5619 if (sp->device_type == XFRAME_I_DEVICE) {
5620 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5621 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5622 I2C_CONTROL_CNTL_START;
5623 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5625 while (exit_cnt < 5) {
5626 val64 = readq(&bar0->i2c_control);
5627 if (I2C_CONTROL_CNTL_END(val64)) {
5628 *data = I2C_CONTROL_GET_DATA(val64);
5637 if (sp->device_type == XFRAME_II_DEVICE) {
5638 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5639 SPI_CONTROL_BYTECNT(0x3) |
5640 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5641 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5642 val64 |= SPI_CONTROL_REQ;
5643 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5644 while (exit_cnt < 5) {
5645 val64 = readq(&bar0->spi_control);
5646 if (val64 & SPI_CONTROL_NACK) {
5649 } else if (val64 & SPI_CONTROL_DONE) {
5650 *data = readq(&bar0->spi_data);
5663 * write_eeprom - actually writes the relevant part of the data value.
5664 * @sp : private member of the device structure, which is a pointer to the
5665 * s2io_nic structure.
5666 * @off : offset at which the data must be written
5667 * @data : The data that is to be written
5668 * @cnt : Number of bytes of the data that are actually to be written into
5669 * the Eeprom. (max of 3)
5671 * Actually writes the relevant part of the data value into the Eeprom
5672 * through the I2C bus.
5674 * 0 on success, -1 on failure.
5677 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5679 int exit_cnt = 0, ret = -1;
5681 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5683 if (sp->device_type == XFRAME_I_DEVICE) {
5684 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5685 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5686 I2C_CONTROL_CNTL_START;
5687 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5689 while (exit_cnt < 5) {
5690 val64 = readq(&bar0->i2c_control);
5691 if (I2C_CONTROL_CNTL_END(val64)) {
5692 if (!(val64 & I2C_CONTROL_NACK))
5701 if (sp->device_type == XFRAME_II_DEVICE) {
5702 int write_cnt = (cnt == 8) ? 0 : cnt;
5703 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5705 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5706 SPI_CONTROL_BYTECNT(write_cnt) |
5707 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5708 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5709 val64 |= SPI_CONTROL_REQ;
5710 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5711 while (exit_cnt < 5) {
5712 val64 = readq(&bar0->spi_control);
5713 if (val64 & SPI_CONTROL_NACK) {
5716 } else if (val64 & SPI_CONTROL_DONE) {
5726 static void s2io_vpd_read(struct s2io_nic *nic)
5730 int i=0, cnt, fail = 0;
5731 int vpd_addr = 0x80;
5733 if (nic->device_type == XFRAME_II_DEVICE) {
5734 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5738 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5741 strcpy(nic->serial_num, "NOT AVAILABLE");
5743 vpd_data = kmalloc(256, GFP_KERNEL);
5745 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5748 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5750 for (i = 0; i < 256; i +=4 ) {
5751 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5752 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5753 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5754 for (cnt = 0; cnt <5; cnt++) {
5756 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5761 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5765 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5766 (u32 *)&vpd_data[i]);
5770 /* read serial number of adapter */
5771 for (cnt = 0; cnt < 256; cnt++) {
5772 if ((vpd_data[cnt] == 'S') &&
5773 (vpd_data[cnt+1] == 'N') &&
5774 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5775 memset(nic->serial_num, 0, VPD_STRING_LEN);
5776 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5783 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5784 memset(nic->product_name, 0, vpd_data[1]);
5785 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5788 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5792 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5793 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5794 * @eeprom : pointer to the user level structure provided by ethtool,
5795 * containing all relevant information.
5796 * @data_buf : user defined value to be written into Eeprom.
5797 * Description: Reads the values stored in the Eeprom at given offset
5798 * for a given length. Stores these values int the input argument data
5799 * buffer 'data_buf' and returns these to the caller (ethtool.)
5804 static int s2io_ethtool_geeprom(struct net_device *dev,
5805 struct ethtool_eeprom *eeprom, u8 * data_buf)
5809 struct s2io_nic *sp = dev->priv;
5811 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5813 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5814 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5816 for (i = 0; i < eeprom->len; i += 4) {
5817 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5818 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5822 memcpy((data_buf + i), &valid, 4);
5828 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5829 * @sp : private member of the device structure, which is a pointer to the
5830 * s2io_nic structure.
5831 * @eeprom : pointer to the user level structure provided by ethtool,
5832 * containing all relevant information.
5833 * @data_buf ; user defined value to be written into Eeprom.
5835 * Tries to write the user provided value in the Eeprom, at the offset
5836 * given by the user.
5838 * 0 on success, -EFAULT on failure.
5841 static int s2io_ethtool_seeprom(struct net_device *dev,
5842 struct ethtool_eeprom *eeprom,
5845 int len = eeprom->len, cnt = 0;
5846 u64 valid = 0, data;
5847 struct s2io_nic *sp = dev->priv;
5849 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5851 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5852 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5858 data = (u32) data_buf[cnt] & 0x000000FF;
5860 valid = (u32) (data << 24);
5864 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5866 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5868 "write into the specified offset\n");
5879 * s2io_register_test - reads and writes into all clock domains.
5880 * @sp : private member of the device structure, which is a pointer to the
5881 * s2io_nic structure.
5882 * @data : variable that returns the result of each of the test conducted b
5885 * Read and write into all clock domains. The NIC has 3 clock domains,
5886 * see that registers in all the three regions are accessible.
5891 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5893 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5894 u64 val64 = 0, exp_val;
5897 val64 = readq(&bar0->pif_rd_swapper_fb);
5898 if (val64 != 0x123456789abcdefULL) {
5900 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5903 val64 = readq(&bar0->rmac_pause_cfg);
5904 if (val64 != 0xc000ffff00000000ULL) {
5906 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5909 val64 = readq(&bar0->rx_queue_cfg);
5910 if (sp->device_type == XFRAME_II_DEVICE)
5911 exp_val = 0x0404040404040404ULL;
5913 exp_val = 0x0808080808080808ULL;
5914 if (val64 != exp_val) {
5916 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5919 val64 = readq(&bar0->xgxs_efifo_cfg);
5920 if (val64 != 0x000000001923141EULL) {
5922 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5925 val64 = 0x5A5A5A5A5A5A5A5AULL;
5926 writeq(val64, &bar0->xmsi_data);
5927 val64 = readq(&bar0->xmsi_data);
5928 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5930 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5933 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5934 writeq(val64, &bar0->xmsi_data);
5935 val64 = readq(&bar0->xmsi_data);
5936 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5938 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5946 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5947 * @sp : private member of the device structure, which is a pointer to the
5948 * s2io_nic structure.
5949 * @data:variable that returns the result of each of the test conducted by
5952 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5958 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5961 u64 ret_data, org_4F0, org_7F0;
5962 u8 saved_4F0 = 0, saved_7F0 = 0;
5963 struct net_device *dev = sp->dev;
5965 /* Test Write Error at offset 0 */
5966 /* Note that SPI interface allows write access to all areas
5967 * of EEPROM. Hence doing all negative testing only for Xframe I.
5969 if (sp->device_type == XFRAME_I_DEVICE)
5970 if (!write_eeprom(sp, 0, 0, 3))
5973 /* Save current values at offsets 0x4F0 and 0x7F0 */
5974 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5976 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5979 /* Test Write at offset 4f0 */
5980 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5982 if (read_eeprom(sp, 0x4F0, &ret_data))
5985 if (ret_data != 0x012345) {
5986 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5987 "Data written %llx Data read %llx\n",
5988 dev->name, (unsigned long long)0x12345,
5989 (unsigned long long)ret_data);
5993 /* Reset the EEPROM data go FFFF */
5994 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5996 /* Test Write Request Error at offset 0x7c */
5997 if (sp->device_type == XFRAME_I_DEVICE)
5998 if (!write_eeprom(sp, 0x07C, 0, 3))
6001 /* Test Write Request at offset 0x7f0 */
6002 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6004 if (read_eeprom(sp, 0x7F0, &ret_data))
6007 if (ret_data != 0x012345) {
6008 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6009 "Data written %llx Data read %llx\n",
6010 dev->name, (unsigned long long)0x12345,
6011 (unsigned long long)ret_data);
6015 /* Reset the EEPROM data go FFFF */
6016 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6018 if (sp->device_type == XFRAME_I_DEVICE) {
6019 /* Test Write Error at offset 0x80 */
6020 if (!write_eeprom(sp, 0x080, 0, 3))
6023 /* Test Write Error at offset 0xfc */
6024 if (!write_eeprom(sp, 0x0FC, 0, 3))
6027 /* Test Write Error at offset 0x100 */
6028 if (!write_eeprom(sp, 0x100, 0, 3))
6031 /* Test Write Error at offset 4ec */
6032 if (!write_eeprom(sp, 0x4EC, 0, 3))
6036 /* Restore values at offsets 0x4F0 and 0x7F0 */
6038 write_eeprom(sp, 0x4F0, org_4F0, 3);
6040 write_eeprom(sp, 0x7F0, org_7F0, 3);
6047 * s2io_bist_test - invokes the MemBist test of the card .
6048 * @sp : private member of the device structure, which is a pointer to the
6049 * s2io_nic structure.
6050 * @data:variable that returns the result of each of the test conducted by
6053 * This invokes the MemBist test of the card. We give around
6054 * 2 secs time for the Test to complete. If it's still not complete
6055 * within this peiod, we consider that the test failed.
6057 * 0 on success and -1 on failure.
6060 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6063 int cnt = 0, ret = -1;
6065 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6066 bist |= PCI_BIST_START;
6067 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6070 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6071 if (!(bist & PCI_BIST_START)) {
6072 *data = (bist & PCI_BIST_CODE_MASK);
6084 * s2io-link_test - verifies the link state of the nic
6085 * @sp ; private member of the device structure, which is a pointer to the
6086 * s2io_nic structure.
6087 * @data: variable that returns the result of each of the test conducted by
6090 * The function verifies the link state of the NIC and updates the input
6091 * argument 'data' appropriately.
6096 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6098 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6101 val64 = readq(&bar0->adapter_status);
6102 if(!(LINK_IS_UP(val64)))
6111 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6112 * @sp - private member of the device structure, which is a pointer to the
6113 * s2io_nic structure.
6114 * @data - variable that returns the result of each of the test
6115 * conducted by the driver.
6117 * This is one of the offline test that tests the read and write
6118 * access to the RldRam chip on the NIC.
6123 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6125 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6127 int cnt, iteration = 0, test_fail = 0;
6129 val64 = readq(&bar0->adapter_control);
6130 val64 &= ~ADAPTER_ECC_EN;
6131 writeq(val64, &bar0->adapter_control);
6133 val64 = readq(&bar0->mc_rldram_test_ctrl);
6134 val64 |= MC_RLDRAM_TEST_MODE;
6135 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6137 val64 = readq(&bar0->mc_rldram_mrs);
6138 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6139 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6141 val64 |= MC_RLDRAM_MRS_ENABLE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6144 while (iteration < 2) {
6145 val64 = 0x55555555aaaa0000ULL;
6146 if (iteration == 1) {
6147 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6149 writeq(val64, &bar0->mc_rldram_test_d0);
6151 val64 = 0xaaaa5a5555550000ULL;
6152 if (iteration == 1) {
6153 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6155 writeq(val64, &bar0->mc_rldram_test_d1);
6157 val64 = 0x55aaaaaaaa5a0000ULL;
6158 if (iteration == 1) {
6159 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6161 writeq(val64, &bar0->mc_rldram_test_d2);
6163 val64 = (u64) (0x0000003ffffe0100ULL);
6164 writeq(val64, &bar0->mc_rldram_test_add);
6166 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6168 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6170 for (cnt = 0; cnt < 5; cnt++) {
6171 val64 = readq(&bar0->mc_rldram_test_ctrl);
6172 if (val64 & MC_RLDRAM_TEST_DONE)
6180 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6181 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6183 for (cnt = 0; cnt < 5; cnt++) {
6184 val64 = readq(&bar0->mc_rldram_test_ctrl);
6185 if (val64 & MC_RLDRAM_TEST_DONE)
6193 val64 = readq(&bar0->mc_rldram_test_ctrl);
6194 if (!(val64 & MC_RLDRAM_TEST_PASS))
6202 /* Bring the adapter out of test mode */
6203 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6209 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6210 * @sp : private member of the device structure, which is a pointer to the
6211 * s2io_nic structure.
6212 * @ethtest : pointer to a ethtool command specific structure that will be
6213 * returned to the user.
6214 * @data : variable that returns the result of each of the test
6215 * conducted by the driver.
6217 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6218 * the health of the card.
6223 static void s2io_ethtool_test(struct net_device *dev,
6224 struct ethtool_test *ethtest,
6227 struct s2io_nic *sp = dev->priv;
6228 int orig_state = netif_running(sp->dev);
6230 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6231 /* Offline Tests. */
6233 s2io_close(sp->dev);
6235 if (s2io_register_test(sp, &data[0]))
6236 ethtest->flags |= ETH_TEST_FL_FAILED;
6240 if (s2io_rldram_test(sp, &data[3]))
6241 ethtest->flags |= ETH_TEST_FL_FAILED;
6245 if (s2io_eeprom_test(sp, &data[1]))
6246 ethtest->flags |= ETH_TEST_FL_FAILED;
6248 if (s2io_bist_test(sp, &data[4]))
6249 ethtest->flags |= ETH_TEST_FL_FAILED;
6259 "%s: is not up, cannot run test\n",
6268 if (s2io_link_test(sp, &data[2]))
6269 ethtest->flags |= ETH_TEST_FL_FAILED;
6278 static void s2io_get_ethtool_stats(struct net_device *dev,
6279 struct ethtool_stats *estats,
6283 struct s2io_nic *sp = dev->priv;
6284 struct stat_block *stat_info = sp->mac_control.stats_info;
6286 s2io_updt_stats(sp);
6288 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6289 le32_to_cpu(stat_info->tmac_frms);
6291 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6292 le32_to_cpu(stat_info->tmac_data_octets);
6293 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6295 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6296 le32_to_cpu(stat_info->tmac_mcst_frms);
6298 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_bcst_frms);
6300 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6302 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6303 le32_to_cpu(stat_info->tmac_ttl_octets);
6305 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_ucst_frms);
6308 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6309 le32_to_cpu(stat_info->tmac_nucst_frms);
6311 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6312 le32_to_cpu(stat_info->tmac_any_err_frms);
6313 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6314 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6316 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6317 le32_to_cpu(stat_info->tmac_vld_ip);
6319 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6320 le32_to_cpu(stat_info->tmac_drop_ip);
6322 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6323 le32_to_cpu(stat_info->tmac_icmp);
6325 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6326 le32_to_cpu(stat_info->tmac_rst_tcp);
6327 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6328 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6329 le32_to_cpu(stat_info->tmac_udp);
6331 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6332 le32_to_cpu(stat_info->rmac_vld_frms);
6334 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6335 le32_to_cpu(stat_info->rmac_data_octets);
6336 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6337 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6339 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6340 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6342 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6343 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6344 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6345 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6346 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6347 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6348 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6350 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6351 le32_to_cpu(stat_info->rmac_ttl_octets);
6353 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6354 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6356 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6357 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6359 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6360 le32_to_cpu(stat_info->rmac_discarded_frms);
6362 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6363 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6364 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6365 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6367 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6368 le32_to_cpu(stat_info->rmac_usized_frms);
6370 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6371 le32_to_cpu(stat_info->rmac_osized_frms);
6373 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6374 le32_to_cpu(stat_info->rmac_frag_frms);
6376 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6377 le32_to_cpu(stat_info->rmac_jabber_frms);
6378 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6379 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6380 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6381 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6382 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6383 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6385 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6386 le32_to_cpu(stat_info->rmac_ip);
6387 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6388 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6390 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6391 le32_to_cpu(stat_info->rmac_drop_ip);
6393 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6394 le32_to_cpu(stat_info->rmac_icmp);
6395 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6397 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6398 le32_to_cpu(stat_info->rmac_udp);
6400 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6401 le32_to_cpu(stat_info->rmac_err_drp_udp);
6402 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6403 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6404 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6405 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6407 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6408 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6411 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6412 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6413 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6414 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6415 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6416 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6417 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6420 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6421 le32_to_cpu(stat_info->rmac_pause_cnt);
6422 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6423 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6425 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6426 le32_to_cpu(stat_info->rmac_accepted_ip);
6427 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6428 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6429 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6430 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6431 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6432 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6433 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6434 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6435 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6447 /* Enhanced statistics exist only for Hercules */
6448 if(sp->device_type == XFRAME_II_DEVICE) {
6450 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6452 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6454 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6455 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6456 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6457 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6458 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6459 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6460 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6461 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6462 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6463 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6464 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6465 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6466 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6471 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6472 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6473 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6474 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6475 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6476 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6477 for (k = 0; k < MAX_RX_RINGS; k++)
6478 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6479 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6480 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6481 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6482 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6483 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6484 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6485 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6486 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6487 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6488 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6489 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6490 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6491 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6492 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6493 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6494 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6495 if (stat_info->sw_stat.num_aggregations) {
6496 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6499 * Since 64-bit divide does not work on all platforms,
6500 * do repeated subtraction.
6502 while (tmp >= stat_info->sw_stat.num_aggregations) {
6503 tmp -= stat_info->sw_stat.num_aggregations;
6506 tmp_stats[i++] = count;
6510 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6511 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6512 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6513 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6514 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6515 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6516 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6517 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6518 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6520 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6521 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6522 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6523 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6526 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6527 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6528 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6554 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6556 return (XENA_REG_SPACE);
6560 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6562 struct s2io_nic *sp = dev->priv;
6564 return (sp->rx_csum);
6567 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6569 struct s2io_nic *sp = dev->priv;
6579 static int s2io_get_eeprom_len(struct net_device *dev)
6581 return (XENA_EEPROM_SPACE);
6584 static int s2io_get_sset_count(struct net_device *dev, int sset)
6586 struct s2io_nic *sp = dev->priv;
6590 return S2IO_TEST_LEN;
6592 switch(sp->device_type) {
6593 case XFRAME_I_DEVICE:
6594 return XFRAME_I_STAT_LEN;
6595 case XFRAME_II_DEVICE:
6596 return XFRAME_II_STAT_LEN;
6605 static void s2io_ethtool_get_strings(struct net_device *dev,
6606 u32 stringset, u8 * data)
6609 struct s2io_nic *sp = dev->priv;
6611 switch (stringset) {
6613 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6616 stat_size = sizeof(ethtool_xena_stats_keys);
6617 memcpy(data, ðtool_xena_stats_keys,stat_size);
6618 if(sp->device_type == XFRAME_II_DEVICE) {
6619 memcpy(data + stat_size,
6620 ðtool_enhanced_stats_keys,
6621 sizeof(ethtool_enhanced_stats_keys));
6622 stat_size += sizeof(ethtool_enhanced_stats_keys);
6625 memcpy(data + stat_size, ðtool_driver_stats_keys,
6626 sizeof(ethtool_driver_stats_keys));
6630 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6633 dev->features |= NETIF_F_IP_CSUM;
6635 dev->features &= ~NETIF_F_IP_CSUM;
6640 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6642 return (dev->features & NETIF_F_TSO) != 0;
6644 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6647 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6649 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6654 static const struct ethtool_ops netdev_ethtool_ops = {
6655 .get_settings = s2io_ethtool_gset,
6656 .set_settings = s2io_ethtool_sset,
6657 .get_drvinfo = s2io_ethtool_gdrvinfo,
6658 .get_regs_len = s2io_ethtool_get_regs_len,
6659 .get_regs = s2io_ethtool_gregs,
6660 .get_link = ethtool_op_get_link,
6661 .get_eeprom_len = s2io_get_eeprom_len,
6662 .get_eeprom = s2io_ethtool_geeprom,
6663 .set_eeprom = s2io_ethtool_seeprom,
6664 .get_ringparam = s2io_ethtool_gringparam,
6665 .get_pauseparam = s2io_ethtool_getpause_data,
6666 .set_pauseparam = s2io_ethtool_setpause_data,
6667 .get_rx_csum = s2io_ethtool_get_rx_csum,
6668 .set_rx_csum = s2io_ethtool_set_rx_csum,
6669 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6670 .set_sg = ethtool_op_set_sg,
6671 .get_tso = s2io_ethtool_op_get_tso,
6672 .set_tso = s2io_ethtool_op_set_tso,
6673 .set_ufo = ethtool_op_set_ufo,
6674 .self_test = s2io_ethtool_test,
6675 .get_strings = s2io_ethtool_get_strings,
6676 .phys_id = s2io_ethtool_idnic,
6677 .get_ethtool_stats = s2io_get_ethtool_stats,
6678 .get_sset_count = s2io_get_sset_count,
6682 * s2io_ioctl - Entry point for the Ioctl
6683 * @dev : Device pointer.
6684 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6685 * a proprietary structure used to pass information to the driver.
6686 * @cmd : This is used to distinguish between the different commands that
6687 * can be passed to the IOCTL functions.
6689 * Currently there are no special functionality supported in IOCTL, hence
6690 * function always return EOPNOTSUPPORTED
6693 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6699 * s2io_change_mtu - entry point to change MTU size for the device.
6700 * @dev : device pointer.
6701 * @new_mtu : the new MTU size for the device.
6702 * Description: A driver entry point to change MTU size for the device.
6703 * Before changing the MTU the device must be stopped.
6705 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6709 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6711 struct s2io_nic *sp = dev->priv;
6714 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6715 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6721 if (netif_running(dev)) {
6722 s2io_stop_all_tx_queue(sp);
6724 ret = s2io_card_up(sp);
6726 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6730 s2io_wake_all_tx_queue(sp);
6731 } else { /* Device is down */
6732 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6733 u64 val64 = new_mtu;
6735 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6742 * s2io_set_link - Set the LInk status
6743 * @data: long pointer to device private structue
6744 * Description: Sets the link status for the adapter
6747 static void s2io_set_link(struct work_struct *work)
6749 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6750 struct net_device *dev = nic->dev;
6751 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6757 if (!netif_running(dev))
6760 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6761 /* The card is being reset, no point doing anything */
6765 subid = nic->pdev->subsystem_device;
6766 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6768 * Allow a small delay for the NICs self initiated
6769 * cleanup to complete.
6774 val64 = readq(&bar0->adapter_status);
6775 if (LINK_IS_UP(val64)) {
6776 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6777 if (verify_xena_quiescence(nic)) {
6778 val64 = readq(&bar0->adapter_control);
6779 val64 |= ADAPTER_CNTL_EN;
6780 writeq(val64, &bar0->adapter_control);
6781 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6782 nic->device_type, subid)) {
6783 val64 = readq(&bar0->gpio_control);
6784 val64 |= GPIO_CTRL_GPIO_0;
6785 writeq(val64, &bar0->gpio_control);
6786 val64 = readq(&bar0->gpio_control);
6788 val64 |= ADAPTER_LED_ON;
6789 writeq(val64, &bar0->adapter_control);
6791 nic->device_enabled_once = TRUE;
6793 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6794 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6795 s2io_stop_all_tx_queue(nic);
6798 val64 = readq(&bar0->adapter_control);
6799 val64 |= ADAPTER_LED_ON;
6800 writeq(val64, &bar0->adapter_control);
6801 s2io_link(nic, LINK_UP);
6803 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6805 val64 = readq(&bar0->gpio_control);
6806 val64 &= ~GPIO_CTRL_GPIO_0;
6807 writeq(val64, &bar0->gpio_control);
6808 val64 = readq(&bar0->gpio_control);
6811 val64 = readq(&bar0->adapter_control);
6812 val64 = val64 &(~ADAPTER_LED_ON);
6813 writeq(val64, &bar0->adapter_control);
6814 s2io_link(nic, LINK_DOWN);
6816 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6822 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6824 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6825 u64 *temp2, int size)
6827 struct net_device *dev = sp->dev;
6828 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6830 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6831 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6834 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6836 * As Rx frame are not going to be processed,
6837 * using same mapped address for the Rxd
6840 rxdp1->Buffer0_ptr = *temp0;
6842 *skb = dev_alloc_skb(size);
6844 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6845 DBG_PRINT(INFO_DBG, "memory to allocate ");
6846 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6847 sp->mac_control.stats_info->sw_stat. \
6848 mem_alloc_fail_cnt++;
6851 sp->mac_control.stats_info->sw_stat.mem_allocated
6852 += (*skb)->truesize;
6853 /* storing the mapped addr in a temp variable
6854 * such it will be used for next rxd whose
6855 * Host Control is NULL
6857 rxdp1->Buffer0_ptr = *temp0 =
6858 pci_map_single( sp->pdev, (*skb)->data,
6859 size - NET_IP_ALIGN,
6860 PCI_DMA_FROMDEVICE);
6861 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
6862 goto memalloc_failed;
6863 rxdp->Host_Control = (unsigned long) (*skb);
6865 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6866 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6867 /* Two buffer Mode */
6869 rxdp3->Buffer2_ptr = *temp2;
6870 rxdp3->Buffer0_ptr = *temp0;
6871 rxdp3->Buffer1_ptr = *temp1;
6873 *skb = dev_alloc_skb(size);
6875 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6876 DBG_PRINT(INFO_DBG, "memory to allocate ");
6877 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6878 sp->mac_control.stats_info->sw_stat. \
6879 mem_alloc_fail_cnt++;
6882 sp->mac_control.stats_info->sw_stat.mem_allocated
6883 += (*skb)->truesize;
6884 rxdp3->Buffer2_ptr = *temp2 =
6885 pci_map_single(sp->pdev, (*skb)->data,
6887 PCI_DMA_FROMDEVICE);
6888 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
6889 goto memalloc_failed;
6890 rxdp3->Buffer0_ptr = *temp0 =
6891 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6892 PCI_DMA_FROMDEVICE);
6893 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
6894 pci_unmap_single (sp->pdev,
6895 (dma_addr_t)rxdp3->Buffer2_ptr,
6896 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6897 goto memalloc_failed;
6899 rxdp->Host_Control = (unsigned long) (*skb);
6901 /* Buffer-1 will be dummy buffer not used */
6902 rxdp3->Buffer1_ptr = *temp1 =
6903 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6904 PCI_DMA_FROMDEVICE);
6905 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
6906 pci_unmap_single (sp->pdev,
6907 (dma_addr_t)rxdp3->Buffer0_ptr,
6908 BUF0_LEN, PCI_DMA_FROMDEVICE);
6909 pci_unmap_single (sp->pdev,
6910 (dma_addr_t)rxdp3->Buffer2_ptr,
6911 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6912 goto memalloc_failed;
6918 stats->pci_map_fail_cnt++;
6919 stats->mem_freed += (*skb)->truesize;
6920 dev_kfree_skb(*skb);
6924 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6927 struct net_device *dev = sp->dev;
6928 if (sp->rxd_mode == RXD_MODE_1) {
6929 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6930 } else if (sp->rxd_mode == RXD_MODE_3B) {
6931 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6932 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6933 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6937 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6939 int i, j, k, blk_cnt = 0, size;
6940 struct mac_info * mac_control = &sp->mac_control;
6941 struct config_param *config = &sp->config;
6942 struct net_device *dev = sp->dev;
6943 struct RxD_t *rxdp = NULL;
6944 struct sk_buff *skb = NULL;
6945 struct buffAdd *ba = NULL;
6946 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6948 /* Calculate the size based on ring mode */
6949 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6950 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6951 if (sp->rxd_mode == RXD_MODE_1)
6952 size += NET_IP_ALIGN;
6953 else if (sp->rxd_mode == RXD_MODE_3B)
6954 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6956 for (i = 0; i < config->rx_ring_num; i++) {
6957 blk_cnt = config->rx_cfg[i].num_rxd /
6958 (rxd_count[sp->rxd_mode] +1);
6960 for (j = 0; j < blk_cnt; j++) {
6961 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6962 rxdp = mac_control->rings[i].
6963 rx_blocks[j].rxds[k].virt_addr;
6964 if(sp->rxd_mode == RXD_MODE_3B)
6965 ba = &mac_control->rings[i].ba[j][k];
6966 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6967 &skb,(u64 *)&temp0_64,
6974 set_rxd_buffer_size(sp, rxdp, size);
6976 /* flip the Ownership bit to Hardware */
6977 rxdp->Control_1 |= RXD_OWN_XENA;
6985 static int s2io_add_isr(struct s2io_nic * sp)
6988 struct net_device *dev = sp->dev;
6991 if (sp->config.intr_type == MSI_X)
6992 ret = s2io_enable_msi_x(sp);
6994 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6995 sp->config.intr_type = INTA;
6998 /* Store the values of the MSIX table in the struct s2io_nic structure */
6999 store_xmsi_data(sp);
7001 /* After proper initialization of H/W, register ISR */
7002 if (sp->config.intr_type == MSI_X) {
7003 int i, msix_rx_cnt = 0;
7005 for (i = 0; i < sp->num_entries; i++) {
7006 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7007 if (sp->s2io_entries[i].type ==
7009 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7011 err = request_irq(sp->entries[i].vector,
7012 s2io_msix_ring_handle, 0,
7014 sp->s2io_entries[i].arg);
7015 } else if (sp->s2io_entries[i].type ==
7017 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7019 err = request_irq(sp->entries[i].vector,
7020 s2io_msix_fifo_handle, 0,
7022 sp->s2io_entries[i].arg);
7025 /* if either data or addr is zero print it. */
7026 if (!(sp->msix_info[i].addr &&
7027 sp->msix_info[i].data)) {
7029 "%s @Addr:0x%llx Data:0x%llx\n",
7031 (unsigned long long)
7032 sp->msix_info[i].addr,
7033 (unsigned long long)
7034 ntohl(sp->msix_info[i].data));
7038 remove_msix_isr(sp);
7041 "%s:MSI-X-%d registration "
7042 "failed\n", dev->name, i);
7045 "%s: Defaulting to INTA\n",
7047 sp->config.intr_type = INTA;
7050 sp->s2io_entries[i].in_use =
7051 MSIX_REGISTERED_SUCCESS;
7055 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7057 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7058 " through alarm vector\n");
7061 if (sp->config.intr_type == INTA) {
7062 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7065 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7072 static void s2io_rem_isr(struct s2io_nic * sp)
7074 if (sp->config.intr_type == MSI_X)
7075 remove_msix_isr(sp);
7077 remove_inta_isr(sp);
7080 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7083 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7084 register u64 val64 = 0;
7085 struct config_param *config;
7086 config = &sp->config;
7088 if (!is_s2io_card_up(sp))
7091 del_timer_sync(&sp->alarm_timer);
7092 /* If s2io_set_link task is executing, wait till it completes. */
7093 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7096 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7099 if (sp->config.napi) {
7101 if (config->intr_type == MSI_X) {
7102 for (; off < sp->config.rx_ring_num; off++)
7103 napi_disable(&sp->mac_control.rings[off].napi);
7106 napi_disable(&sp->napi);
7109 /* disable Tx and Rx traffic on the NIC */
7115 /* Check if the device is Quiescent and then Reset the NIC */
7117 /* As per the HW requirement we need to replenish the
7118 * receive buffer to avoid the ring bump. Since there is
7119 * no intention of processing the Rx frame at this pointwe are
7120 * just settting the ownership bit of rxd in Each Rx
7121 * ring to HW and set the appropriate buffer size
7122 * based on the ring mode
7124 rxd_owner_bit_reset(sp);
7126 val64 = readq(&bar0->adapter_status);
7127 if (verify_xena_quiescence(sp)) {
7128 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7136 "s2io_close:Device not Quiescent ");
7137 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7138 (unsigned long long) val64);
7145 /* Free all Tx buffers */
7146 free_tx_buffers(sp);
7148 /* Free all Rx buffers */
7149 free_rx_buffers(sp);
7151 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7154 static void s2io_card_down(struct s2io_nic * sp)
7156 do_s2io_card_down(sp, 1);
7159 static int s2io_card_up(struct s2io_nic * sp)
7162 struct mac_info *mac_control;
7163 struct config_param *config;
7164 struct net_device *dev = (struct net_device *) sp->dev;
7167 /* Initialize the H/W I/O registers */
7170 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7178 * Initializing the Rx buffers. For now we are considering only 1
7179 * Rx ring and initializing buffers into 30 Rx blocks
7181 mac_control = &sp->mac_control;
7182 config = &sp->config;
7184 for (i = 0; i < config->rx_ring_num; i++) {
7185 mac_control->rings[i].mtu = dev->mtu;
7186 ret = fill_rx_buffers(&mac_control->rings[i]);
7188 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7191 free_rx_buffers(sp);
7194 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7195 mac_control->rings[i].rx_bufs_left);
7198 /* Initialise napi */
7201 if (config->intr_type == MSI_X) {
7202 for (i = 0; i < sp->config.rx_ring_num; i++)
7203 napi_enable(&sp->mac_control.rings[i].napi);
7205 napi_enable(&sp->napi);
7209 /* Maintain the state prior to the open */
7210 if (sp->promisc_flg)
7211 sp->promisc_flg = 0;
7212 if (sp->m_cast_flg) {
7214 sp->all_multi_pos= 0;
7217 /* Setting its receive mode */
7218 s2io_set_multicast(dev);
7221 /* Initialize max aggregatable pkts per session based on MTU */
7222 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7223 /* Check if we can use(if specified) user provided value */
7224 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7225 sp->lro_max_aggr_per_sess = lro_max_pkts;
7228 /* Enable Rx Traffic and interrupts on the NIC */
7229 if (start_nic(sp)) {
7230 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7232 free_rx_buffers(sp);
7236 /* Add interrupt service routine */
7237 if (s2io_add_isr(sp) != 0) {
7238 if (sp->config.intr_type == MSI_X)
7241 free_rx_buffers(sp);
7245 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7247 /* Enable select interrupts */
7248 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7249 if (sp->config.intr_type != INTA)
7250 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7252 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7253 interruptible |= TX_PIC_INTR;
7254 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7257 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7262 * s2io_restart_nic - Resets the NIC.
7263 * @data : long pointer to the device private structure
7265 * This function is scheduled to be run by the s2io_tx_watchdog
7266 * function after 0.5 secs to reset the NIC. The idea is to reduce
7267 * the run time of the watch dog routine which is run holding a
7271 static void s2io_restart_nic(struct work_struct *work)
7273 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7274 struct net_device *dev = sp->dev;
7278 if (!netif_running(dev))
7282 if (s2io_card_up(sp)) {
7283 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7286 s2io_wake_all_tx_queue(sp);
7287 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7294 * s2io_tx_watchdog - Watchdog for transmit side.
7295 * @dev : Pointer to net device structure
7297 * This function is triggered if the Tx Queue is stopped
7298 * for a pre-defined amount of time when the Interface is still up.
7299 * If the Interface is jammed in such a situation, the hardware is
7300 * reset (by s2io_close) and restarted again (by s2io_open) to
7301 * overcome any problem that might have been caused in the hardware.
7306 static void s2io_tx_watchdog(struct net_device *dev)
7308 struct s2io_nic *sp = dev->priv;
7310 if (netif_carrier_ok(dev)) {
7311 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7312 schedule_work(&sp->rst_timer_task);
7313 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7318 * rx_osm_handler - To perform some OS related operations on SKB.
7319 * @sp: private member of the device structure,pointer to s2io_nic structure.
7320 * @skb : the socket buffer pointer.
7321 * @len : length of the packet
7322 * @cksum : FCS checksum of the frame.
7323 * @ring_no : the ring from which this RxD was extracted.
7325 * This function is called by the Rx interrupt serivce routine to perform
7326 * some OS related operations on the SKB before passing it to the upper
7327 * layers. It mainly checks if the checksum is OK, if so adds it to the
7328 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7329 * to the upper layer. If the checksum is wrong, it increments the Rx
7330 * packet error count, frees the SKB and returns error.
7332 * SUCCESS on success and -1 on failure.
7334 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7336 struct s2io_nic *sp = ring_data->nic;
7337 struct net_device *dev = (struct net_device *) ring_data->dev;
7338 struct sk_buff *skb = (struct sk_buff *)
7339 ((unsigned long) rxdp->Host_Control);
7340 int ring_no = ring_data->ring_no;
7341 u16 l3_csum, l4_csum;
7342 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7349 /* Check for parity error */
7351 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7353 err_mask = err >> 48;
7356 sp->mac_control.stats_info->sw_stat.
7357 rx_parity_err_cnt++;
7361 sp->mac_control.stats_info->sw_stat.
7366 sp->mac_control.stats_info->sw_stat.
7367 rx_parity_abort_cnt++;
7371 sp->mac_control.stats_info->sw_stat.
7376 sp->mac_control.stats_info->sw_stat.
7381 sp->mac_control.stats_info->sw_stat.
7386 sp->mac_control.stats_info->sw_stat.
7387 rx_buf_size_err_cnt++;
7391 sp->mac_control.stats_info->sw_stat.
7392 rx_rxd_corrupt_cnt++;
7396 sp->mac_control.stats_info->sw_stat.
7401 * Drop the packet if bad transfer code. Exception being
7402 * 0x5, which could be due to unsupported IPv6 extension header.
7403 * In this case, we let stack handle the packet.
7404 * Note that in this case, since checksum will be incorrect,
7405 * stack will validate the same.
7407 if (err_mask != 0x5) {
7408 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7409 dev->name, err_mask);
7410 sp->stats.rx_crc_errors++;
7411 sp->mac_control.stats_info->sw_stat.mem_freed
7414 ring_data->rx_bufs_left -= 1;
7415 rxdp->Host_Control = 0;
7420 /* Updating statistics */
7421 ring_data->rx_packets++;
7422 rxdp->Host_Control = 0;
7423 if (sp->rxd_mode == RXD_MODE_1) {
7424 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7426 ring_data->rx_bytes += len;
7429 } else if (sp->rxd_mode == RXD_MODE_3B) {
7430 int get_block = ring_data->rx_curr_get_info.block_index;
7431 int get_off = ring_data->rx_curr_get_info.offset;
7432 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7433 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7434 unsigned char *buff = skb_push(skb, buf0_len);
7436 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7437 ring_data->rx_bytes += buf0_len + buf2_len;
7438 memcpy(buff, ba->ba_0, buf0_len);
7439 skb_put(skb, buf2_len);
7442 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7443 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7445 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7446 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7447 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7449 * NIC verifies if the Checksum of the received
7450 * frame is Ok or not and accordingly returns
7451 * a flag in the RxD.
7453 skb->ip_summed = CHECKSUM_UNNECESSARY;
7454 if (ring_data->lro) {
7459 ret = s2io_club_tcp_session(ring_data,
7460 skb->data, &tcp, &tcp_len, &lro,
7463 case 3: /* Begin anew */
7466 case 1: /* Aggregate */
7468 lro_append_pkt(sp, lro,
7472 case 4: /* Flush session */
7474 lro_append_pkt(sp, lro,
7476 queue_rx_frame(lro->parent,
7478 clear_lro_session(lro);
7479 sp->mac_control.stats_info->
7480 sw_stat.flush_max_pkts++;
7483 case 2: /* Flush both */
7484 lro->parent->data_len =
7486 sp->mac_control.stats_info->
7487 sw_stat.sending_both++;
7488 queue_rx_frame(lro->parent,
7490 clear_lro_session(lro);
7492 case 0: /* sessions exceeded */
7493 case -1: /* non-TCP or not
7497 * First pkt in session not
7498 * L3/L4 aggregatable
7503 "%s: Samadhana!!\n",
7510 * Packet with erroneous checksum, let the
7511 * upper layers deal with it.
7513 skb->ip_summed = CHECKSUM_NONE;
7516 skb->ip_summed = CHECKSUM_NONE;
7518 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7520 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7521 dev->last_rx = jiffies;
7523 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7528 * s2io_link - stops/starts the Tx queue.
7529 * @sp : private member of the device structure, which is a pointer to the
7530 * s2io_nic structure.
7531 * @link : inidicates whether link is UP/DOWN.
7533 * This function stops/starts the Tx queue depending on whether the link
7534 * status of the NIC is is down or up. This is called by the Alarm
7535 * interrupt handler whenever a link change interrupt comes up.
7540 static void s2io_link(struct s2io_nic * sp, int link)
7542 struct net_device *dev = (struct net_device *) sp->dev;
7544 if (link != sp->last_link_state) {
7546 if (link == LINK_DOWN) {
7547 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7548 s2io_stop_all_tx_queue(sp);
7549 netif_carrier_off(dev);
7550 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7551 sp->mac_control.stats_info->sw_stat.link_up_time =
7552 jiffies - sp->start_time;
7553 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7555 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7556 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7557 sp->mac_control.stats_info->sw_stat.link_down_time =
7558 jiffies - sp->start_time;
7559 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7560 netif_carrier_on(dev);
7561 s2io_wake_all_tx_queue(sp);
7564 sp->last_link_state = link;
7565 sp->start_time = jiffies;
7569 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7570 * @sp : private member of the device structure, which is a pointer to the
7571 * s2io_nic structure.
7573 * This function initializes a few of the PCI and PCI-X configuration registers
7574 * with recommended values.
7579 static void s2io_init_pci(struct s2io_nic * sp)
7581 u16 pci_cmd = 0, pcix_cmd = 0;
7583 /* Enable Data Parity Error Recovery in PCI-X command register. */
7584 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7586 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7588 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7591 /* Set the PErr Response bit in PCI command register. */
7592 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7593 pci_write_config_word(sp->pdev, PCI_COMMAND,
7594 (pci_cmd | PCI_COMMAND_PARITY));
7595 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7598 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7601 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7602 (tx_fifo_num < 1)) {
7603 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7604 "(%d) not supported\n", tx_fifo_num);
7606 if (tx_fifo_num < 1)
7609 tx_fifo_num = MAX_TX_FIFOS;
7611 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7612 DBG_PRINT(ERR_DBG, "tx fifos\n");
7616 *dev_multiq = multiq;
7618 if (tx_steering_type && (1 == tx_fifo_num)) {
7619 if (tx_steering_type != TX_DEFAULT_STEERING)
7621 "s2io: Tx steering is not supported with "
7622 "one fifo. Disabling Tx steering.\n");
7623 tx_steering_type = NO_STEERING;
7626 if ((tx_steering_type < NO_STEERING) ||
7627 (tx_steering_type > TX_DEFAULT_STEERING)) {
7628 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7630 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7631 tx_steering_type = NO_STEERING;
7634 if (rx_ring_num > MAX_RX_RINGS) {
7635 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7637 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7639 rx_ring_num = MAX_RX_RINGS;
7642 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7643 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7644 "Defaulting to INTA\n");
7645 *dev_intr_type = INTA;
7648 if ((*dev_intr_type == MSI_X) &&
7649 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7650 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7651 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7652 "Defaulting to INTA\n");
7653 *dev_intr_type = INTA;
7656 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7657 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7658 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7665 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7666 * or Traffic class respectively.
7667 * @nic: device private variable
7668 * Description: The function configures the receive steering to
7669 * desired receive ring.
7670 * Return Value: SUCCESS on success and
7671 * '-1' on failure (endian settings incorrect).
7673 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7676 register u64 val64 = 0;
7678 if (ds_codepoint > 63)
7681 val64 = RTS_DS_MEM_DATA(ring);
7682 writeq(val64, &bar0->rts_ds_mem_data);
7684 val64 = RTS_DS_MEM_CTRL_WE |
7685 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7686 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7688 writeq(val64, &bar0->rts_ds_mem_ctrl);
7690 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7691 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7696 * s2io_init_nic - Initialization of the adapter .
7697 * @pdev : structure containing the PCI related information of the device.
7698 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7700 * The function initializes an adapter identified by the pci_dec structure.
7701 * All OS related initialization including memory and device structure and
7702 * initlaization of the device private variable is done. Also the swapper
7703 * control register is initialized to enable read and write into the I/O
7704 * registers of the device.
7706 * returns 0 on success and negative on failure.
7709 static int __devinit
7710 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7712 struct s2io_nic *sp;
7713 struct net_device *dev;
7715 int dma_flag = FALSE;
7716 u32 mac_up, mac_down;
7717 u64 val64 = 0, tmp64 = 0;
7718 struct XENA_dev_config __iomem *bar0 = NULL;
7720 struct mac_info *mac_control;
7721 struct config_param *config;
7723 u8 dev_intr_type = intr_type;
7725 DECLARE_MAC_BUF(mac);
7727 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7731 if ((ret = pci_enable_device(pdev))) {
7733 "s2io_init_nic: pci_enable_device failed\n");
7737 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7738 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7740 if (pci_set_consistent_dma_mask
7741 (pdev, DMA_64BIT_MASK)) {
7743 "Unable to obtain 64bit DMA for \
7744 consistent allocations\n");
7745 pci_disable_device(pdev);
7748 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7749 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7751 pci_disable_device(pdev);
7754 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7755 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7756 pci_disable_device(pdev);
7760 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7762 dev = alloc_etherdev(sizeof(struct s2io_nic));
7764 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7765 pci_disable_device(pdev);
7766 pci_release_regions(pdev);
7770 pci_set_master(pdev);
7771 pci_set_drvdata(pdev, dev);
7772 SET_NETDEV_DEV(dev, &pdev->dev);
7774 /* Private member variable initialized to s2io NIC structure */
7776 memset(sp, 0, sizeof(struct s2io_nic));
7779 sp->high_dma_flag = dma_flag;
7780 sp->device_enabled_once = FALSE;
7781 if (rx_ring_mode == 1)
7782 sp->rxd_mode = RXD_MODE_1;
7783 if (rx_ring_mode == 2)
7784 sp->rxd_mode = RXD_MODE_3B;
7786 sp->config.intr_type = dev_intr_type;
7788 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7789 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7790 sp->device_type = XFRAME_II_DEVICE;
7792 sp->device_type = XFRAME_I_DEVICE;
7794 sp->lro = lro_enable;
7796 /* Initialize some PCI/PCI-X fields of the NIC. */
7800 * Setting the device configuration parameters.
7801 * Most of these parameters can be specified by the user during
7802 * module insertion as they are module loadable parameters. If
7803 * these parameters are not not specified during load time, they
7804 * are initialized with default values.
7806 mac_control = &sp->mac_control;
7807 config = &sp->config;
7809 config->napi = napi;
7810 config->tx_steering_type = tx_steering_type;
7812 /* Tx side parameters. */
7813 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7814 config->tx_fifo_num = MAX_TX_FIFOS;
7816 config->tx_fifo_num = tx_fifo_num;
7818 /* Initialize the fifos used for tx steering */
7819 if (config->tx_fifo_num < 5) {
7820 if (config->tx_fifo_num == 1)
7821 sp->total_tcp_fifos = 1;
7823 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7824 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7825 sp->total_udp_fifos = 1;
7826 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7828 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7829 FIFO_OTHER_MAX_NUM);
7830 sp->udp_fifo_idx = sp->total_tcp_fifos;
7831 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7832 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7835 config->multiq = dev_multiq;
7836 for (i = 0; i < config->tx_fifo_num; i++) {
7837 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7838 config->tx_cfg[i].fifo_priority = i;
7841 /* mapping the QoS priority to the configured fifos */
7842 for (i = 0; i < MAX_TX_FIFOS; i++)
7843 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7845 /* map the hashing selector table to the configured fifos */
7846 for (i = 0; i < config->tx_fifo_num; i++)
7847 sp->fifo_selector[i] = fifo_selector[i];
7850 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7851 for (i = 0; i < config->tx_fifo_num; i++) {
7852 config->tx_cfg[i].f_no_snoop =
7853 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7854 if (config->tx_cfg[i].fifo_len < 65) {
7855 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7859 /* + 2 because one Txd for skb->data and one Txd for UFO */
7860 config->max_txds = MAX_SKB_FRAGS + 2;
7862 /* Rx side parameters. */
7863 config->rx_ring_num = rx_ring_num;
7864 for (i = 0; i < config->rx_ring_num; i++) {
7865 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7866 (rxd_count[sp->rxd_mode] + 1);
7867 config->rx_cfg[i].ring_priority = i;
7868 mac_control->rings[i].rx_bufs_left = 0;
7869 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7870 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7871 mac_control->rings[i].pdev = sp->pdev;
7872 mac_control->rings[i].dev = sp->dev;
7875 for (i = 0; i < rx_ring_num; i++) {
7876 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7877 config->rx_cfg[i].f_no_snoop =
7878 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7881 /* Setting Mac Control parameters */
7882 mac_control->rmac_pause_time = rmac_pause_time;
7883 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7884 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7887 /* initialize the shared memory used by the NIC and the host */
7888 if (init_shared_mem(sp)) {
7889 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7892 goto mem_alloc_failed;
7895 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7896 pci_resource_len(pdev, 0));
7898 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7901 goto bar0_remap_failed;
7904 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7905 pci_resource_len(pdev, 2));
7907 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7910 goto bar1_remap_failed;
7913 dev->irq = pdev->irq;
7914 dev->base_addr = (unsigned long) sp->bar0;
7916 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7917 for (j = 0; j < MAX_TX_FIFOS; j++) {
7918 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7919 (sp->bar1 + (j * 0x00020000));
7922 /* Driver entry points */
7923 dev->open = &s2io_open;
7924 dev->stop = &s2io_close;
7925 dev->hard_start_xmit = &s2io_xmit;
7926 dev->get_stats = &s2io_get_stats;
7927 dev->set_multicast_list = &s2io_set_multicast;
7928 dev->do_ioctl = &s2io_ioctl;
7929 dev->set_mac_address = &s2io_set_mac_addr;
7930 dev->change_mtu = &s2io_change_mtu;
7931 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7932 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7933 dev->vlan_rx_register = s2io_vlan_rx_register;
7934 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7937 * will use eth_mac_addr() for dev->set_mac_address
7938 * mac address will be set every time dev->open() is called
7940 #ifdef CONFIG_NET_POLL_CONTROLLER
7941 dev->poll_controller = s2io_netpoll;
7944 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7945 if (sp->high_dma_flag == TRUE)
7946 dev->features |= NETIF_F_HIGHDMA;
7947 dev->features |= NETIF_F_TSO;
7948 dev->features |= NETIF_F_TSO6;
7949 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7950 dev->features |= NETIF_F_UFO;
7951 dev->features |= NETIF_F_HW_CSUM;
7954 dev->features |= NETIF_F_MULTI_QUEUE;
7955 dev->tx_timeout = &s2io_tx_watchdog;
7956 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7957 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7958 INIT_WORK(&sp->set_link_task, s2io_set_link);
7960 pci_save_state(sp->pdev);
7962 /* Setting swapper control on the NIC, for proper reset operation */
7963 if (s2io_set_swapper(sp)) {
7964 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7967 goto set_swap_failed;
7970 /* Verify if the Herc works on the slot its placed into */
7971 if (sp->device_type & XFRAME_II_DEVICE) {
7972 mode = s2io_verify_pci_mode(sp);
7974 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7975 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7977 goto set_swap_failed;
7981 if (sp->config.intr_type == MSI_X) {
7982 sp->num_entries = config->rx_ring_num + 1;
7983 ret = s2io_enable_msi_x(sp);
7986 ret = s2io_test_msi(sp);
7987 /* rollback MSI-X, will re-enable during add_isr() */
7988 remove_msix_isr(sp);
7993 "%s: MSI-X requested but failed to enable\n",
7995 sp->config.intr_type = INTA;
7999 if (config->intr_type == MSI_X) {
8000 for (i = 0; i < config->rx_ring_num ; i++)
8001 netif_napi_add(dev, &mac_control->rings[i].napi,
8002 s2io_poll_msix, 64);
8004 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8007 /* Not needed for Herc */
8008 if (sp->device_type & XFRAME_I_DEVICE) {
8010 * Fix for all "FFs" MAC address problems observed on
8013 fix_mac_address(sp);
8018 * MAC address initialization.
8019 * For now only one mac address will be read and used.
8022 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8023 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8024 writeq(val64, &bar0->rmac_addr_cmd_mem);
8025 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8026 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8027 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8028 mac_down = (u32) tmp64;
8029 mac_up = (u32) (tmp64 >> 32);
8031 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8032 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8033 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8034 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8035 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8036 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8038 /* Set the factory defined MAC address initially */
8039 dev->addr_len = ETH_ALEN;
8040 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8041 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8043 /* initialize number of multicast & unicast MAC entries variables */
8044 if (sp->device_type == XFRAME_I_DEVICE) {
8045 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8046 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8047 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8048 } else if (sp->device_type == XFRAME_II_DEVICE) {
8049 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8050 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8051 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8054 /* store mac addresses from CAM to s2io_nic structure */
8055 do_s2io_store_unicast_mc(sp);
8057 /* Configure MSIX vector for number of rings configured plus one */
8058 if ((sp->device_type == XFRAME_II_DEVICE) &&
8059 (config->intr_type == MSI_X))
8060 sp->num_entries = config->rx_ring_num + 1;
8062 /* Store the values of the MSIX table in the s2io_nic structure */
8063 store_xmsi_data(sp);
8064 /* reset Nic and bring it to known state */
8068 * Initialize link state flags
8069 * and the card state parameter
8073 /* Initialize spinlocks */
8074 for (i = 0; i < sp->config.tx_fifo_num; i++)
8075 spin_lock_init(&mac_control->fifos[i].tx_lock);
8078 * SXE-002: Configure link and activity LED to init state
8081 subid = sp->pdev->subsystem_device;
8082 if ((subid & 0xFF) >= 0x07) {
8083 val64 = readq(&bar0->gpio_control);
8084 val64 |= 0x0000800000000000ULL;
8085 writeq(val64, &bar0->gpio_control);
8086 val64 = 0x0411040400000000ULL;
8087 writeq(val64, (void __iomem *) bar0 + 0x2700);
8088 val64 = readq(&bar0->gpio_control);
8091 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8093 if (register_netdev(dev)) {
8094 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8096 goto register_failed;
8099 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8100 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8101 sp->product_name, pdev->revision);
8102 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8103 s2io_driver_version);
8104 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8105 dev->name, print_mac(mac, dev->dev_addr));
8106 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8107 if (sp->device_type & XFRAME_II_DEVICE) {
8108 mode = s2io_print_pci_mode(sp);
8110 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8112 unregister_netdev(dev);
8113 goto set_swap_failed;
8116 switch(sp->rxd_mode) {
8118 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8122 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8127 switch (sp->config.napi) {
8129 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8132 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8136 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8137 sp->config.tx_fifo_num);
8139 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8140 sp->config.rx_ring_num);
8142 switch(sp->config.intr_type) {
8144 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8147 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8150 if (sp->config.multiq) {
8151 for (i = 0; i < sp->config.tx_fifo_num; i++)
8152 mac_control->fifos[i].multiq = config->multiq;
8153 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8156 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8159 switch (sp->config.tx_steering_type) {
8161 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8162 " transmit\n", dev->name);
8164 case TX_PRIORITY_STEERING:
8165 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8166 " transmit\n", dev->name);
8168 case TX_DEFAULT_STEERING:
8169 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8170 " transmit\n", dev->name);
8174 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8177 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8178 " enabled\n", dev->name);
8179 /* Initialize device name */
8180 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8183 * Make Link state as off at this point, when the Link change
8184 * interrupt comes the state will be automatically changed to
8187 netif_carrier_off(dev);
8198 free_shared_mem(sp);
8199 pci_disable_device(pdev);
8200 pci_release_regions(pdev);
8201 pci_set_drvdata(pdev, NULL);
8208 * s2io_rem_nic - Free the PCI device
8209 * @pdev: structure containing the PCI related information of the device.
8210 * Description: This function is called by the Pci subsystem to release a
8211 * PCI device and free up all resource held up by the device. This could
8212 * be in response to a Hot plug event or when the driver is to be removed
8216 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8218 struct net_device *dev =
8219 (struct net_device *) pci_get_drvdata(pdev);
8220 struct s2io_nic *sp;
8223 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8227 flush_scheduled_work();
8230 unregister_netdev(dev);
8232 free_shared_mem(sp);
8235 pci_release_regions(pdev);
8236 pci_set_drvdata(pdev, NULL);
8238 pci_disable_device(pdev);
8242 * s2io_starter - Entry point for the driver
8243 * Description: This function is the entry point for the driver. It verifies
8244 * the module loadable parameters and initializes PCI configuration space.
8247 static int __init s2io_starter(void)
8249 return pci_register_driver(&s2io_driver);
8253 * s2io_closer - Cleanup routine for the driver
8254 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8257 static __exit void s2io_closer(void)
8259 pci_unregister_driver(&s2io_driver);
8260 DBG_PRINT(INIT_DBG, "cleanup done\n");
8263 module_init(s2io_starter);
8264 module_exit(s2io_closer);
8266 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8267 struct tcphdr **tcp, struct RxD_t *rxdp,
8268 struct s2io_nic *sp)
8271 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8273 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8274 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8279 /* Checking for DIX type or DIX type with VLAN */
8281 || (l2_type == 4)) {
8282 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8284 * If vlan stripping is disabled and the frame is VLAN tagged,
8285 * shift the offset by the VLAN header size bytes.
8287 if ((!vlan_strip_flag) &&
8288 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8289 ip_off += HEADER_VLAN_SIZE;
8291 /* LLC, SNAP etc are considered non-mergeable */
8295 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8296 ip_len = (u8)((*ip)->ihl);
8298 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8303 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8306 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8307 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8308 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8313 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8315 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8318 static void initiate_new_session(struct lro *lro, u8 *l2h,
8319 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8321 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8325 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8326 lro->tcp_ack = tcp->ack_seq;
8328 lro->total_len = ntohs(ip->tot_len);
8330 lro->vlan_tag = vlan_tag;
8332 * check if we saw TCP timestamp. Other consistency checks have
8333 * already been done.
8335 if (tcp->doff == 8) {
8337 ptr = (__be32 *)(tcp+1);
8339 lro->cur_tsval = ntohl(*(ptr+1));
8340 lro->cur_tsecr = *(ptr+2);
8345 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8347 struct iphdr *ip = lro->iph;
8348 struct tcphdr *tcp = lro->tcph;
8350 struct stat_block *statinfo = sp->mac_control.stats_info;
8351 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8353 /* Update L3 header */
8354 ip->tot_len = htons(lro->total_len);
8356 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8359 /* Update L4 header */
8360 tcp->ack_seq = lro->tcp_ack;
8361 tcp->window = lro->window;
8363 /* Update tsecr field if this session has timestamps enabled */
8365 __be32 *ptr = (__be32 *)(tcp + 1);
8366 *(ptr+2) = lro->cur_tsecr;
8369 /* Update counters required for calculation of
8370 * average no. of packets aggregated.
8372 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8373 statinfo->sw_stat.num_aggregations++;
8376 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8377 struct tcphdr *tcp, u32 l4_pyld)
8379 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8380 lro->total_len += l4_pyld;
8381 lro->frags_len += l4_pyld;
8382 lro->tcp_next_seq += l4_pyld;
8385 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8386 lro->tcp_ack = tcp->ack_seq;
8387 lro->window = tcp->window;
8391 /* Update tsecr and tsval from this packet */
8392 ptr = (__be32 *)(tcp+1);
8393 lro->cur_tsval = ntohl(*(ptr+1));
8394 lro->cur_tsecr = *(ptr + 2);
8398 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8399 struct tcphdr *tcp, u32 tcp_pyld_len)
8403 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8405 if (!tcp_pyld_len) {
8406 /* Runt frame or a pure ack */
8410 if (ip->ihl != 5) /* IP has options */
8413 /* If we see CE codepoint in IP header, packet is not mergeable */
8414 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8417 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8418 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8419 tcp->ece || tcp->cwr || !tcp->ack) {
8421 * Currently recognize only the ack control word and
8422 * any other control field being set would result in
8423 * flushing the LRO session
8429 * Allow only one TCP timestamp option. Don't aggregate if
8430 * any other options are detected.
8432 if (tcp->doff != 5 && tcp->doff != 8)
8435 if (tcp->doff == 8) {
8436 ptr = (u8 *)(tcp + 1);
8437 while (*ptr == TCPOPT_NOP)
8439 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8442 /* Ensure timestamp value increases monotonically */
8444 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8447 /* timestamp echo reply should be non-zero */
8448 if (*((__be32 *)(ptr+6)) == 0)
8456 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8457 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8458 struct s2io_nic *sp)
8461 struct tcphdr *tcph;
8465 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8467 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8468 ip->saddr, ip->daddr);
8472 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8473 tcph = (struct tcphdr *)*tcp;
8474 *tcp_len = get_l4_pyld_length(ip, tcph);
8475 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8476 struct lro *l_lro = &ring_data->lro0_n[i];
8477 if (l_lro->in_use) {
8478 if (check_for_socket_match(l_lro, ip, tcph))
8480 /* Sock pair matched */
8483 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8484 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8485 "0x%x, actual 0x%x\n", __FUNCTION__,
8486 (*lro)->tcp_next_seq,
8489 sp->mac_control.stats_info->
8490 sw_stat.outof_sequence_pkts++;
8495 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8496 ret = 1; /* Aggregate */
8498 ret = 2; /* Flush both */
8504 /* Before searching for available LRO objects,
8505 * check if the pkt is L3/L4 aggregatable. If not
8506 * don't create new LRO session. Just send this
8509 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8513 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8514 struct lro *l_lro = &ring_data->lro0_n[i];
8515 if (!(l_lro->in_use)) {
8517 ret = 3; /* Begin anew */
8523 if (ret == 0) { /* sessions exceeded */
8524 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8532 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8536 update_L3L4_header(sp, *lro);
8539 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8540 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8541 update_L3L4_header(sp, *lro);
8542 ret = 4; /* Flush the LRO */
8546 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8554 static void clear_lro_session(struct lro *lro)
8556 static u16 lro_struct_size = sizeof(struct lro);
8558 memset(lro, 0, lro_struct_size);
8561 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8563 struct net_device *dev = skb->dev;
8564 struct s2io_nic *sp = dev->priv;
8566 skb->protocol = eth_type_trans(skb, dev);
8567 if (sp->vlgrp && vlan_tag
8568 && (vlan_strip_flag)) {
8569 /* Queueing the vlan frame to the upper layer */
8570 if (sp->config.napi)
8571 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8573 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8575 if (sp->config.napi)
8576 netif_receive_skb(skb);
8582 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8583 struct sk_buff *skb,
8586 struct sk_buff *first = lro->parent;
8588 first->len += tcp_len;
8589 first->data_len = lro->frags_len;
8590 skb_pull(skb, (skb->len - tcp_len));
8591 if (skb_shinfo(first)->frag_list)
8592 lro->last_frag->next = skb;
8594 skb_shinfo(first)->frag_list = skb;
8595 first->truesize += skb->truesize;
8596 lro->last_frag = skb;
8597 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8602 * s2io_io_error_detected - called when PCI error is detected
8603 * @pdev: Pointer to PCI device
8604 * @state: The current pci connection state
8606 * This function is called after a PCI bus error affecting
8607 * this device has been detected.
8609 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8610 pci_channel_state_t state)
8612 struct net_device *netdev = pci_get_drvdata(pdev);
8613 struct s2io_nic *sp = netdev->priv;
8615 netif_device_detach(netdev);
8617 if (netif_running(netdev)) {
8618 /* Bring down the card, while avoiding PCI I/O */
8619 do_s2io_card_down(sp, 0);
8621 pci_disable_device(pdev);
8623 return PCI_ERS_RESULT_NEED_RESET;
8627 * s2io_io_slot_reset - called after the pci bus has been reset.
8628 * @pdev: Pointer to PCI device
8630 * Restart the card from scratch, as if from a cold-boot.
8631 * At this point, the card has exprienced a hard reset,
8632 * followed by fixups by BIOS, and has its config space
8633 * set up identically to what it was at cold boot.
8635 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8637 struct net_device *netdev = pci_get_drvdata(pdev);
8638 struct s2io_nic *sp = netdev->priv;
8640 if (pci_enable_device(pdev)) {
8641 printk(KERN_ERR "s2io: "
8642 "Cannot re-enable PCI device after reset.\n");
8643 return PCI_ERS_RESULT_DISCONNECT;
8646 pci_set_master(pdev);
8649 return PCI_ERS_RESULT_RECOVERED;
8653 * s2io_io_resume - called when traffic can start flowing again.
8654 * @pdev: Pointer to PCI device
8656 * This callback is called when the error recovery driver tells
8657 * us that its OK to resume normal operation.
8659 static void s2io_io_resume(struct pci_dev *pdev)
8661 struct net_device *netdev = pci_get_drvdata(pdev);
8662 struct s2io_nic *sp = netdev->priv;
8664 if (netif_running(netdev)) {
8665 if (s2io_card_up(sp)) {
8666 printk(KERN_ERR "s2io: "
8667 "Can't bring device back up after reset.\n");
8671 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8673 printk(KERN_ERR "s2io: "
8674 "Can't resetore mac addr after reset.\n");
8679 netif_device_attach(netdev);
8680 netif_wake_queue(netdev);