1 /*******************************************************************************
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
31 char ixgb_driver_name[] = "ixgb";
32 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
34 #ifndef CONFIG_IXGB_NAPI
37 #define DRIVERNAPI "-NAPI"
39 #define DRV_VERSION "1.0.126-k4"DRIVERNAPI
40 const char ixgb_driver_version[] = DRV_VERSION;
41 static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 #define IXGB_CB_LENGTH 256
44 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
45 module_param(copybreak, uint, 0644);
46 MODULE_PARM_DESC(copybreak,
47 "Maximum size of packet that is copied to a new buffer on receive");
49 /* ixgb_pci_tbl - PCI Device ID Table
51 * Wildcard entries (PCI_ANY_ID) should come last
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
55 * Class, Class Mask, private data (not used) }
57 static struct pci_device_id ixgb_pci_tbl[] = {
58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
61 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
62 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
63 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
64 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
65 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
67 /* required last entry */
71 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
73 /* Local Function Prototypes */
74 static int ixgb_init_module(void);
75 static void ixgb_exit_module(void);
76 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
77 static void __devexit ixgb_remove(struct pci_dev *pdev);
78 static int ixgb_sw_init(struct ixgb_adapter *adapter);
79 static int ixgb_open(struct net_device *netdev);
80 static int ixgb_close(struct net_device *netdev);
81 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
82 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
83 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
84 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
85 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
86 static void ixgb_set_multi(struct net_device *netdev);
87 static void ixgb_watchdog(unsigned long data);
88 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
89 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
90 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
91 static int ixgb_set_mac(struct net_device *netdev, void *p);
92 static irqreturn_t ixgb_intr(int irq, void *data);
93 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
95 #ifdef CONFIG_IXGB_NAPI
96 static int ixgb_clean(struct napi_struct *, int);
97 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
99 static bool ixgb_clean_rx_irq(struct ixgb_adapter *);
101 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
103 static void ixgb_tx_timeout(struct net_device *dev);
104 static void ixgb_tx_timeout_task(struct work_struct *work);
106 static void ixgb_vlan_rx_register(struct net_device *netdev,
107 struct vlan_group *grp);
108 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
109 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
110 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
112 #ifdef CONFIG_NET_POLL_CONTROLLER
113 /* for netdump / net console */
114 static void ixgb_netpoll(struct net_device *dev);
117 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
118 enum pci_channel_state state);
119 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
120 static void ixgb_io_resume (struct pci_dev *pdev);
122 static struct pci_error_handlers ixgb_err_handler = {
123 .error_detected = ixgb_io_error_detected,
124 .slot_reset = ixgb_io_slot_reset,
125 .resume = ixgb_io_resume,
128 static struct pci_driver ixgb_driver = {
129 .name = ixgb_driver_name,
130 .id_table = ixgb_pci_tbl,
132 .remove = __devexit_p(ixgb_remove),
133 .err_handler = &ixgb_err_handler
136 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
137 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
138 MODULE_LICENSE("GPL");
139 MODULE_VERSION(DRV_VERSION);
141 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
142 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
143 module_param(debug, int, 0);
144 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
147 * ixgb_init_module - Driver Registration Routine
149 * ixgb_init_module is the first routine called when the driver is
150 * loaded. All it does is register with the PCI subsystem.
154 ixgb_init_module(void)
156 printk(KERN_INFO "%s - version %s\n",
157 ixgb_driver_string, ixgb_driver_version);
159 printk(KERN_INFO "%s\n", ixgb_copyright);
161 return pci_register_driver(&ixgb_driver);
164 module_init(ixgb_init_module);
167 * ixgb_exit_module - Driver Exit Cleanup Routine
169 * ixgb_exit_module is called just before the driver is removed
174 ixgb_exit_module(void)
176 pci_unregister_driver(&ixgb_driver);
179 module_exit(ixgb_exit_module);
182 * ixgb_irq_disable - Mask off interrupt generation on the NIC
183 * @adapter: board private structure
187 ixgb_irq_disable(struct ixgb_adapter *adapter)
189 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
190 IXGB_WRITE_FLUSH(&adapter->hw);
191 synchronize_irq(adapter->pdev->irq);
195 * ixgb_irq_enable - Enable default interrupt generation settings
196 * @adapter: board private structure
200 ixgb_irq_enable(struct ixgb_adapter *adapter)
202 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
203 IXGB_INT_TXDW | IXGB_INT_LSC;
204 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
205 val |= IXGB_INT_GPI0;
206 IXGB_WRITE_REG(&adapter->hw, IMS, val);
207 IXGB_WRITE_FLUSH(&adapter->hw);
211 ixgb_up(struct ixgb_adapter *adapter)
213 struct net_device *netdev = adapter->netdev;
214 int err, irq_flags = IRQF_SHARED;
215 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
216 struct ixgb_hw *hw = &adapter->hw;
218 /* hardware has been reset, we need to reload some things */
220 ixgb_rar_set(hw, netdev->dev_addr, 0);
221 ixgb_set_multi(netdev);
223 ixgb_restore_vlan(adapter);
225 ixgb_configure_tx(adapter);
226 ixgb_setup_rctl(adapter);
227 ixgb_configure_rx(adapter);
228 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
230 /* disable interrupts and get the hardware into a known state */
231 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
233 /* only enable MSI if bus is in PCI-X mode */
234 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
235 err = pci_enable_msi(adapter->pdev);
237 adapter->have_msi = 1;
240 /* proceed to try to request regular interrupt */
243 err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
244 netdev->name, netdev);
246 if (adapter->have_msi)
247 pci_disable_msi(adapter->pdev);
249 "Unable to allocate interrupt Error: %d\n", err);
253 if ((hw->max_frame_size != max_frame) ||
254 (hw->max_frame_size !=
255 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
257 hw->max_frame_size = max_frame;
259 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
261 if (hw->max_frame_size >
262 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
263 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
265 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
266 ctrl0 |= IXGB_CTRL0_JFE;
267 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
272 clear_bit(__IXGB_DOWN, &adapter->flags);
274 #ifdef CONFIG_IXGB_NAPI
275 napi_enable(&adapter->napi);
277 ixgb_irq_enable(adapter);
279 mod_timer(&adapter->watchdog_timer, jiffies);
285 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
287 struct net_device *netdev = adapter->netdev;
289 /* prevent the interrupt handler from restarting watchdog */
290 set_bit(__IXGB_DOWN, &adapter->flags);
292 #ifdef CONFIG_IXGB_NAPI
293 napi_disable(&adapter->napi);
295 /* waiting for NAPI to complete can re-enable interrupts */
296 ixgb_irq_disable(adapter);
297 free_irq(adapter->pdev->irq, netdev);
299 if (adapter->have_msi)
300 pci_disable_msi(adapter->pdev);
303 del_timer_sync(&adapter->watchdog_timer);
305 adapter->link_speed = 0;
306 adapter->link_duplex = 0;
307 netif_carrier_off(netdev);
308 netif_stop_queue(netdev);
311 ixgb_clean_tx_ring(adapter);
312 ixgb_clean_rx_ring(adapter);
316 ixgb_reset(struct ixgb_adapter *adapter)
318 struct ixgb_hw *hw = &adapter->hw;
320 ixgb_adapter_stop(hw);
321 if (!ixgb_init_hw(hw))
322 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
324 /* restore frame size information */
325 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
326 if (hw->max_frame_size >
327 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
328 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
329 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
330 ctrl0 |= IXGB_CTRL0_JFE;
331 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
337 * ixgb_probe - Device Initialization Routine
338 * @pdev: PCI device information struct
339 * @ent: entry in ixgb_pci_tbl
341 * Returns 0 on success, negative on failure
343 * ixgb_probe initializes an adapter identified by a pci_dev structure.
344 * The OS initialization, configuring of the adapter private structure,
345 * and a hardware reset occur.
349 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
351 struct net_device *netdev = NULL;
352 struct ixgb_adapter *adapter;
353 static int cards_found = 0;
358 err = pci_enable_device(pdev);
362 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
363 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
366 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
367 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
369 "ixgb: No usable DMA configuration, aborting\n");
375 err = pci_request_regions(pdev, ixgb_driver_name);
377 goto err_request_regions;
379 pci_set_master(pdev);
381 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
384 goto err_alloc_etherdev;
387 SET_NETDEV_DEV(netdev, &pdev->dev);
389 pci_set_drvdata(pdev, netdev);
390 adapter = netdev_priv(netdev);
391 adapter->netdev = netdev;
392 adapter->pdev = pdev;
393 adapter->hw.back = adapter;
394 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
396 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
397 pci_resource_len(pdev, BAR_0));
398 if (!adapter->hw.hw_addr) {
403 for (i = BAR_1; i <= BAR_5; i++) {
404 if (pci_resource_len(pdev, i) == 0)
406 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
407 adapter->hw.io_base = pci_resource_start(pdev, i);
412 netdev->open = &ixgb_open;
413 netdev->stop = &ixgb_close;
414 netdev->hard_start_xmit = &ixgb_xmit_frame;
415 netdev->get_stats = &ixgb_get_stats;
416 netdev->set_multicast_list = &ixgb_set_multi;
417 netdev->set_mac_address = &ixgb_set_mac;
418 netdev->change_mtu = &ixgb_change_mtu;
419 ixgb_set_ethtool_ops(netdev);
420 netdev->tx_timeout = &ixgb_tx_timeout;
421 netdev->watchdog_timeo = 5 * HZ;
422 #ifdef CONFIG_IXGB_NAPI
423 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
425 netdev->vlan_rx_register = ixgb_vlan_rx_register;
426 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
427 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
428 #ifdef CONFIG_NET_POLL_CONTROLLER
429 netdev->poll_controller = ixgb_netpoll;
432 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
434 adapter->bd_number = cards_found;
435 adapter->link_speed = 0;
436 adapter->link_duplex = 0;
438 /* setup the private structure */
440 err = ixgb_sw_init(adapter);
444 netdev->features = NETIF_F_SG |
448 NETIF_F_HW_VLAN_FILTER;
449 netdev->features |= NETIF_F_TSO;
452 netdev->features |= NETIF_F_HIGHDMA;
454 /* make sure the EEPROM is good */
456 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
457 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
462 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
463 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
465 if (!is_valid_ether_addr(netdev->perm_addr)) {
466 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
471 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
473 init_timer(&adapter->watchdog_timer);
474 adapter->watchdog_timer.function = &ixgb_watchdog;
475 adapter->watchdog_timer.data = (unsigned long)adapter;
477 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
479 strcpy(netdev->name, "eth%d");
480 err = register_netdev(netdev);
484 /* we're going to reset, so assume we have no link for now */
486 netif_carrier_off(netdev);
487 netif_stop_queue(netdev);
489 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
490 ixgb_check_options(adapter);
491 /* reset the hardware with the new settings */
501 iounmap(adapter->hw.hw_addr);
505 pci_release_regions(pdev);
508 pci_disable_device(pdev);
513 * ixgb_remove - Device Removal Routine
514 * @pdev: PCI device information struct
516 * ixgb_remove is called by the PCI subsystem to alert the driver
517 * that it should release a PCI device. The could be caused by a
518 * Hot-Plug event, or because the driver is going to be removed from
522 static void __devexit
523 ixgb_remove(struct pci_dev *pdev)
525 struct net_device *netdev = pci_get_drvdata(pdev);
526 struct ixgb_adapter *adapter = netdev_priv(netdev);
528 flush_scheduled_work();
530 unregister_netdev(netdev);
532 iounmap(adapter->hw.hw_addr);
533 pci_release_regions(pdev);
539 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
540 * @adapter: board private structure to initialize
542 * ixgb_sw_init initializes the Adapter private data structure.
543 * Fields are initialized based on PCI device information and
544 * OS network device settings (MTU size).
548 ixgb_sw_init(struct ixgb_adapter *adapter)
550 struct ixgb_hw *hw = &adapter->hw;
551 struct net_device *netdev = adapter->netdev;
552 struct pci_dev *pdev = adapter->pdev;
554 /* PCI config space info */
556 hw->vendor_id = pdev->vendor;
557 hw->device_id = pdev->device;
558 hw->subsystem_vendor_id = pdev->subsystem_vendor;
559 hw->subsystem_id = pdev->subsystem_device;
561 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
562 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
564 if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
565 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
566 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
567 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
568 hw->mac_type = ixgb_82597;
570 /* should never have loaded on this device */
571 DPRINTK(PROBE, ERR, "unsupported device id\n");
574 /* enable flow control to be programmed */
577 set_bit(__IXGB_DOWN, &adapter->flags);
582 * ixgb_open - Called when a network interface is made active
583 * @netdev: network interface device structure
585 * Returns 0 on success, negative value on failure
587 * The open entry point is called when a network interface is made
588 * active by the system (IFF_UP). At this point all resources needed
589 * for transmit and receive operations are allocated, the interrupt
590 * handler is registered with the OS, the watchdog timer is started,
591 * and the stack is notified that the interface is ready.
595 ixgb_open(struct net_device *netdev)
597 struct ixgb_adapter *adapter = netdev_priv(netdev);
600 /* allocate transmit descriptors */
601 err = ixgb_setup_tx_resources(adapter);
605 /* allocate receive descriptors */
607 err = ixgb_setup_rx_resources(adapter);
611 err = ixgb_up(adapter);
618 ixgb_free_rx_resources(adapter);
620 ixgb_free_tx_resources(adapter);
628 * ixgb_close - Disables a network interface
629 * @netdev: network interface device structure
631 * Returns 0, this is not allowed to fail
633 * The close entry point is called when an interface is de-activated
634 * by the OS. The hardware is still under the drivers control, but
635 * needs to be disabled. A global MAC reset is issued to stop the
636 * hardware, and all transmit and receive resources are freed.
640 ixgb_close(struct net_device *netdev)
642 struct ixgb_adapter *adapter = netdev_priv(netdev);
644 ixgb_down(adapter, true);
646 ixgb_free_tx_resources(adapter);
647 ixgb_free_rx_resources(adapter);
653 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
654 * @adapter: board private structure
656 * Return 0 on success, negative on failure
660 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
662 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
663 struct pci_dev *pdev = adapter->pdev;
666 size = sizeof(struct ixgb_buffer) * txdr->count;
667 txdr->buffer_info = vmalloc(size);
668 if (!txdr->buffer_info) {
670 "Unable to allocate transmit descriptor ring memory\n");
673 memset(txdr->buffer_info, 0, size);
675 /* round up to nearest 4K */
677 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
678 txdr->size = ALIGN(txdr->size, 4096);
680 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
682 vfree(txdr->buffer_info);
684 "Unable to allocate transmit descriptor memory\n");
687 memset(txdr->desc, 0, txdr->size);
689 txdr->next_to_use = 0;
690 txdr->next_to_clean = 0;
696 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
697 * @adapter: board private structure
699 * Configure the Tx unit of the MAC after a reset.
703 ixgb_configure_tx(struct ixgb_adapter *adapter)
705 u64 tdba = adapter->tx_ring.dma;
706 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
708 struct ixgb_hw *hw = &adapter->hw;
710 /* Setup the Base and Length of the Tx Descriptor Ring
711 * tx_ring.dma can be either a 32 or 64 bit value
714 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
715 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
717 IXGB_WRITE_REG(hw, TDLEN, tdlen);
719 /* Setup the HW Tx Head and Tail descriptor pointers */
721 IXGB_WRITE_REG(hw, TDH, 0);
722 IXGB_WRITE_REG(hw, TDT, 0);
724 /* don't set up txdctl, it induces performance problems if configured
726 /* Set the Tx Interrupt Delay register */
728 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
730 /* Program the Transmit Control Register */
732 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
733 IXGB_WRITE_REG(hw, TCTL, tctl);
735 /* Setup Transmit Descriptor Settings for this adapter */
736 adapter->tx_cmd_type =
738 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
742 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
743 * @adapter: board private structure
745 * Returns 0 on success, negative on failure
749 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
751 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
752 struct pci_dev *pdev = adapter->pdev;
755 size = sizeof(struct ixgb_buffer) * rxdr->count;
756 rxdr->buffer_info = vmalloc(size);
757 if (!rxdr->buffer_info) {
759 "Unable to allocate receive descriptor ring\n");
762 memset(rxdr->buffer_info, 0, size);
764 /* Round up to nearest 4K */
766 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
767 rxdr->size = ALIGN(rxdr->size, 4096);
769 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
772 vfree(rxdr->buffer_info);
774 "Unable to allocate receive descriptors\n");
777 memset(rxdr->desc, 0, rxdr->size);
779 rxdr->next_to_clean = 0;
780 rxdr->next_to_use = 0;
786 * ixgb_setup_rctl - configure the receive control register
787 * @adapter: Board private structure
791 ixgb_setup_rctl(struct ixgb_adapter *adapter)
795 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
797 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
800 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
801 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
802 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
804 rctl |= IXGB_RCTL_SECRC;
806 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
807 rctl |= IXGB_RCTL_BSIZE_2048;
808 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
809 rctl |= IXGB_RCTL_BSIZE_4096;
810 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
811 rctl |= IXGB_RCTL_BSIZE_8192;
812 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
813 rctl |= IXGB_RCTL_BSIZE_16384;
815 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
819 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
820 * @adapter: board private structure
822 * Configure the Rx unit of the MAC after a reset.
826 ixgb_configure_rx(struct ixgb_adapter *adapter)
828 u64 rdba = adapter->rx_ring.dma;
829 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
830 struct ixgb_hw *hw = &adapter->hw;
834 /* make sure receives are disabled while setting up the descriptors */
836 rctl = IXGB_READ_REG(hw, RCTL);
837 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
839 /* set the Receive Delay Timer Register */
841 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
843 /* Setup the Base and Length of the Rx Descriptor Ring */
845 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
846 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
848 IXGB_WRITE_REG(hw, RDLEN, rdlen);
850 /* Setup the HW Rx Head and Tail Descriptor Pointers */
851 IXGB_WRITE_REG(hw, RDH, 0);
852 IXGB_WRITE_REG(hw, RDT, 0);
854 /* due to the hardware errata with RXDCTL, we are unable to use any of
855 * the performance enhancing features of it without causing other
856 * subtle bugs, some of the bugs could include receive length
857 * corruption at high data rates (WTHRESH > 0) and/or receive
858 * descriptor ring irregularites (particularly in hardware cache) */
859 IXGB_WRITE_REG(hw, RXDCTL, 0);
861 /* Enable Receive Checksum Offload for TCP and UDP */
862 if (adapter->rx_csum) {
863 rxcsum = IXGB_READ_REG(hw, RXCSUM);
864 rxcsum |= IXGB_RXCSUM_TUOFL;
865 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
868 /* Enable Receives */
870 IXGB_WRITE_REG(hw, RCTL, rctl);
874 * ixgb_free_tx_resources - Free Tx Resources
875 * @adapter: board private structure
877 * Free all transmit software resources
881 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
883 struct pci_dev *pdev = adapter->pdev;
885 ixgb_clean_tx_ring(adapter);
887 vfree(adapter->tx_ring.buffer_info);
888 adapter->tx_ring.buffer_info = NULL;
890 pci_free_consistent(pdev, adapter->tx_ring.size,
891 adapter->tx_ring.desc, adapter->tx_ring.dma);
893 adapter->tx_ring.desc = NULL;
897 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
898 struct ixgb_buffer *buffer_info)
900 struct pci_dev *pdev = adapter->pdev;
902 if (buffer_info->dma)
903 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
906 /* okay to call kfree_skb here instead of kfree_skb_any because
907 * this is never called in interrupt context */
908 if (buffer_info->skb)
909 dev_kfree_skb(buffer_info->skb);
911 buffer_info->skb = NULL;
912 buffer_info->dma = 0;
913 buffer_info->time_stamp = 0;
914 /* these fields must always be initialized in tx
915 * buffer_info->length = 0;
916 * buffer_info->next_to_watch = 0; */
920 * ixgb_clean_tx_ring - Free Tx Buffers
921 * @adapter: board private structure
925 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
927 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
928 struct ixgb_buffer *buffer_info;
932 /* Free all the Tx ring sk_buffs */
934 for (i = 0; i < tx_ring->count; i++) {
935 buffer_info = &tx_ring->buffer_info[i];
936 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
939 size = sizeof(struct ixgb_buffer) * tx_ring->count;
940 memset(tx_ring->buffer_info, 0, size);
942 /* Zero out the descriptor ring */
944 memset(tx_ring->desc, 0, tx_ring->size);
946 tx_ring->next_to_use = 0;
947 tx_ring->next_to_clean = 0;
949 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
950 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
954 * ixgb_free_rx_resources - Free Rx Resources
955 * @adapter: board private structure
957 * Free all receive software resources
961 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
963 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
964 struct pci_dev *pdev = adapter->pdev;
966 ixgb_clean_rx_ring(adapter);
968 vfree(rx_ring->buffer_info);
969 rx_ring->buffer_info = NULL;
971 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
973 rx_ring->desc = NULL;
977 * ixgb_clean_rx_ring - Free Rx Buffers
978 * @adapter: board private structure
982 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
984 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
985 struct ixgb_buffer *buffer_info;
986 struct pci_dev *pdev = adapter->pdev;
990 /* Free all the Rx ring sk_buffs */
992 for (i = 0; i < rx_ring->count; i++) {
993 buffer_info = &rx_ring->buffer_info[i];
994 if (buffer_info->skb) {
996 pci_unmap_single(pdev,
1001 dev_kfree_skb(buffer_info->skb);
1003 buffer_info->skb = NULL;
1007 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1008 memset(rx_ring->buffer_info, 0, size);
1010 /* Zero out the descriptor ring */
1012 memset(rx_ring->desc, 0, rx_ring->size);
1014 rx_ring->next_to_clean = 0;
1015 rx_ring->next_to_use = 0;
1017 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1018 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1022 * ixgb_set_mac - Change the Ethernet Address of the NIC
1023 * @netdev: network interface device structure
1024 * @p: pointer to an address structure
1026 * Returns 0 on success, negative on failure
1030 ixgb_set_mac(struct net_device *netdev, void *p)
1032 struct ixgb_adapter *adapter = netdev_priv(netdev);
1033 struct sockaddr *addr = p;
1035 if (!is_valid_ether_addr(addr->sa_data))
1036 return -EADDRNOTAVAIL;
1038 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1040 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1046 * ixgb_set_multi - Multicast and Promiscuous mode set
1047 * @netdev: network interface device structure
1049 * The set_multi entry point is called whenever the multicast address
1050 * list or the network interface flags are updated. This routine is
1051 * responsible for configuring the hardware for proper multicast,
1052 * promiscuous mode, and all-multi behavior.
1056 ixgb_set_multi(struct net_device *netdev)
1058 struct ixgb_adapter *adapter = netdev_priv(netdev);
1059 struct ixgb_hw *hw = &adapter->hw;
1060 struct dev_mc_list *mc_ptr;
1064 /* Check for Promiscuous and All Multicast modes */
1066 rctl = IXGB_READ_REG(hw, RCTL);
1068 if (netdev->flags & IFF_PROMISC) {
1069 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1070 } else if (netdev->flags & IFF_ALLMULTI) {
1071 rctl |= IXGB_RCTL_MPE;
1072 rctl &= ~IXGB_RCTL_UPE;
1074 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1077 if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1078 rctl |= IXGB_RCTL_MPE;
1079 IXGB_WRITE_REG(hw, RCTL, rctl);
1081 u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1082 IXGB_ETH_LENGTH_OF_ADDRESS];
1084 IXGB_WRITE_REG(hw, RCTL, rctl);
1086 for (i = 0, mc_ptr = netdev->mc_list;
1088 i++, mc_ptr = mc_ptr->next)
1089 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1090 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1092 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1097 * ixgb_watchdog - Timer Call-back
1098 * @data: pointer to netdev cast into an unsigned long
1102 ixgb_watchdog(unsigned long data)
1104 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1105 struct net_device *netdev = adapter->netdev;
1106 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1108 ixgb_check_for_link(&adapter->hw);
1110 if (ixgb_check_for_bad_link(&adapter->hw)) {
1111 /* force the reset path */
1112 netif_stop_queue(netdev);
1115 if (adapter->hw.link_up) {
1116 if (!netif_carrier_ok(netdev)) {
1118 "NIC Link is Up 10000 Mbps Full Duplex\n");
1119 adapter->link_speed = 10000;
1120 adapter->link_duplex = FULL_DUPLEX;
1121 netif_carrier_on(netdev);
1122 netif_wake_queue(netdev);
1125 if (netif_carrier_ok(netdev)) {
1126 adapter->link_speed = 0;
1127 adapter->link_duplex = 0;
1128 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1129 netif_carrier_off(netdev);
1130 netif_stop_queue(netdev);
1135 ixgb_update_stats(adapter);
1137 if (!netif_carrier_ok(netdev)) {
1138 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1139 /* We've lost link, so the controller stops DMA,
1140 * but we've got queued Tx work that's never going
1141 * to get done, so reset controller to flush Tx.
1142 * (Do the reset outside of interrupt context). */
1143 schedule_work(&adapter->tx_timeout_task);
1147 /* Force detection of hung controller every watchdog period */
1148 adapter->detect_tx_hung = true;
1150 /* generate an interrupt to force clean up of any stragglers */
1151 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1153 /* Reset the timer */
1154 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1157 #define IXGB_TX_FLAGS_CSUM 0x00000001
1158 #define IXGB_TX_FLAGS_VLAN 0x00000002
1159 #define IXGB_TX_FLAGS_TSO 0x00000004
1162 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1164 struct ixgb_context_desc *context_desc;
1166 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1167 u16 ipcse, tucse, mss;
1170 if (likely(skb_is_gso(skb))) {
1171 struct ixgb_buffer *buffer_info;
1174 if (skb_header_cloned(skb)) {
1175 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1180 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1181 mss = skb_shinfo(skb)->gso_size;
1185 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1188 ipcss = skb_network_offset(skb);
1189 ipcso = (void *)&(iph->check) - (void *)skb->data;
1190 ipcse = skb_transport_offset(skb) - 1;
1191 tucss = skb_transport_offset(skb);
1192 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1195 i = adapter->tx_ring.next_to_use;
1196 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1197 buffer_info = &adapter->tx_ring.buffer_info[i];
1198 WARN_ON(buffer_info->dma != 0);
1200 context_desc->ipcss = ipcss;
1201 context_desc->ipcso = ipcso;
1202 context_desc->ipcse = cpu_to_le16(ipcse);
1203 context_desc->tucss = tucss;
1204 context_desc->tucso = tucso;
1205 context_desc->tucse = cpu_to_le16(tucse);
1206 context_desc->mss = cpu_to_le16(mss);
1207 context_desc->hdr_len = hdr_len;
1208 context_desc->status = 0;
1209 context_desc->cmd_type_len = cpu_to_le32(
1210 IXGB_CONTEXT_DESC_TYPE
1211 | IXGB_CONTEXT_DESC_CMD_TSE
1212 | IXGB_CONTEXT_DESC_CMD_IP
1213 | IXGB_CONTEXT_DESC_CMD_TCP
1214 | IXGB_CONTEXT_DESC_CMD_IDE
1215 | (skb->len - (hdr_len)));
1218 if (++i == adapter->tx_ring.count) i = 0;
1219 adapter->tx_ring.next_to_use = i;
1228 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1230 struct ixgb_context_desc *context_desc;
1234 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1235 struct ixgb_buffer *buffer_info;
1236 css = skb_transport_offset(skb);
1237 cso = css + skb->csum_offset;
1239 i = adapter->tx_ring.next_to_use;
1240 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1241 buffer_info = &adapter->tx_ring.buffer_info[i];
1242 WARN_ON(buffer_info->dma != 0);
1244 context_desc->tucss = css;
1245 context_desc->tucso = cso;
1246 context_desc->tucse = 0;
1247 /* zero out any previously existing data in one instruction */
1248 *(u32 *)&(context_desc->ipcss) = 0;
1249 context_desc->status = 0;
1250 context_desc->hdr_len = 0;
1251 context_desc->mss = 0;
1252 context_desc->cmd_type_len =
1253 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1254 | IXGB_TX_DESC_CMD_IDE);
1256 if (++i == adapter->tx_ring.count) i = 0;
1257 adapter->tx_ring.next_to_use = i;
1265 #define IXGB_MAX_TXD_PWR 14
1266 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1269 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1272 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1273 struct ixgb_buffer *buffer_info;
1275 unsigned int offset = 0, size, count = 0, i;
1276 unsigned int mss = skb_shinfo(skb)->gso_size;
1278 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1281 len -= skb->data_len;
1283 i = tx_ring->next_to_use;
1286 buffer_info = &tx_ring->buffer_info[i];
1287 size = min(len, IXGB_MAX_DATA_PER_TXD);
1288 /* Workaround for premature desc write-backs
1289 * in TSO mode. Append 4-byte sentinel desc */
1290 if (unlikely(mss && !nr_frags && size == len && size > 8))
1293 buffer_info->length = size;
1294 WARN_ON(buffer_info->dma != 0);
1295 buffer_info->time_stamp = jiffies;
1297 pci_map_single(adapter->pdev,
1301 buffer_info->next_to_watch = 0;
1306 if (++i == tx_ring->count) i = 0;
1309 for (f = 0; f < nr_frags; f++) {
1310 struct skb_frag_struct *frag;
1312 frag = &skb_shinfo(skb)->frags[f];
1317 buffer_info = &tx_ring->buffer_info[i];
1318 size = min(len, IXGB_MAX_DATA_PER_TXD);
1320 /* Workaround for premature desc write-backs
1321 * in TSO mode. Append 4-byte sentinel desc */
1322 if (unlikely(mss && (f == (nr_frags - 1))
1323 && size == len && size > 8))
1326 buffer_info->length = size;
1327 buffer_info->time_stamp = jiffies;
1329 pci_map_page(adapter->pdev,
1331 frag->page_offset + offset,
1334 buffer_info->next_to_watch = 0;
1339 if (++i == tx_ring->count) i = 0;
1342 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1343 tx_ring->buffer_info[i].skb = skb;
1344 tx_ring->buffer_info[first].next_to_watch = i;
1350 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1352 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1353 struct ixgb_tx_desc *tx_desc = NULL;
1354 struct ixgb_buffer *buffer_info;
1355 u32 cmd_type_len = adapter->tx_cmd_type;
1360 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1361 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1362 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1365 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1366 popts |= IXGB_TX_DESC_POPTS_TXSM;
1368 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1369 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1371 i = tx_ring->next_to_use;
1374 buffer_info = &tx_ring->buffer_info[i];
1375 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1376 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1377 tx_desc->cmd_type_len =
1378 cpu_to_le32(cmd_type_len | buffer_info->length);
1379 tx_desc->status = status;
1380 tx_desc->popts = popts;
1381 tx_desc->vlan = cpu_to_le16(vlan_id);
1383 if (++i == tx_ring->count) i = 0;
1386 tx_desc->cmd_type_len |=
1387 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1389 /* Force memory writes to complete before letting h/w
1390 * know there are new descriptors to fetch. (Only
1391 * applicable for weak-ordered memory model archs,
1392 * such as IA-64). */
1395 tx_ring->next_to_use = i;
1396 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1399 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1401 struct ixgb_adapter *adapter = netdev_priv(netdev);
1402 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1404 netif_stop_queue(netdev);
1405 /* Herbert's original patch had:
1406 * smp_mb__after_netif_stop_queue();
1407 * but since that doesn't exist yet, just open code it. */
1410 /* We need to check again in a case another CPU has just
1411 * made room available. */
1412 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1416 netif_start_queue(netdev);
1417 ++adapter->restart_queue;
1421 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1422 struct ixgb_desc_ring *tx_ring, int size)
1424 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1426 return __ixgb_maybe_stop_tx(netdev, size);
1430 /* Tx Descriptors needed, worst case */
1431 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1432 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1433 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1434 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1435 + 1 /* one more needed for sentinel TSO workaround */
1438 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1440 struct ixgb_adapter *adapter = netdev_priv(netdev);
1442 unsigned int tx_flags = 0;
1446 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1448 return NETDEV_TX_OK;
1451 if (skb->len <= 0) {
1456 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1458 return NETDEV_TX_BUSY;
1460 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1461 tx_flags |= IXGB_TX_FLAGS_VLAN;
1462 vlan_id = vlan_tx_tag_get(skb);
1465 first = adapter->tx_ring.next_to_use;
1467 tso = ixgb_tso(adapter, skb);
1470 return NETDEV_TX_OK;
1474 tx_flags |= IXGB_TX_FLAGS_TSO;
1475 else if (ixgb_tx_csum(adapter, skb))
1476 tx_flags |= IXGB_TX_FLAGS_CSUM;
1478 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1481 netdev->trans_start = jiffies;
1483 /* Make sure there is space in the ring for the next send. */
1484 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1486 return NETDEV_TX_OK;
1490 * ixgb_tx_timeout - Respond to a Tx Hang
1491 * @netdev: network interface device structure
1495 ixgb_tx_timeout(struct net_device *netdev)
1497 struct ixgb_adapter *adapter = netdev_priv(netdev);
1499 /* Do the reset outside of interrupt context */
1500 schedule_work(&adapter->tx_timeout_task);
1504 ixgb_tx_timeout_task(struct work_struct *work)
1506 struct ixgb_adapter *adapter =
1507 container_of(work, struct ixgb_adapter, tx_timeout_task);
1509 adapter->tx_timeout_count++;
1510 ixgb_down(adapter, true);
1515 * ixgb_get_stats - Get System Network Statistics
1516 * @netdev: network interface device structure
1518 * Returns the address of the device statistics structure.
1519 * The statistics are actually updated from the timer callback.
1522 static struct net_device_stats *
1523 ixgb_get_stats(struct net_device *netdev)
1525 struct ixgb_adapter *adapter = netdev_priv(netdev);
1527 return &adapter->net_stats;
1531 * ixgb_change_mtu - Change the Maximum Transfer Unit
1532 * @netdev: network interface device structure
1533 * @new_mtu: new value for maximum frame size
1535 * Returns 0 on success, negative on failure
1539 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1541 struct ixgb_adapter *adapter = netdev_priv(netdev);
1542 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1543 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1545 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1546 if ((new_mtu < 68) ||
1547 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1548 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1552 if (old_max_frame == max_frame)
1555 if (netif_running(netdev))
1556 ixgb_down(adapter, true);
1558 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1560 netdev->mtu = new_mtu;
1562 if (netif_running(netdev))
1569 * ixgb_update_stats - Update the board statistics counters.
1570 * @adapter: board private structure
1574 ixgb_update_stats(struct ixgb_adapter *adapter)
1576 struct net_device *netdev = adapter->netdev;
1577 struct pci_dev *pdev = adapter->pdev;
1579 /* Prevent stats update while adapter is being reset */
1580 if (pci_channel_offline(pdev))
1583 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1584 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1585 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1586 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1587 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1588 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1590 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1591 /* fix up multicast stats by removing broadcasts */
1595 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1596 adapter->stats.mprch += (multi >> 32);
1597 adapter->stats.bprcl += bcast_l;
1598 adapter->stats.bprch += bcast_h;
1600 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1601 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1602 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1603 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1605 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1606 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1607 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1608 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1609 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1610 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1611 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1612 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1613 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1614 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1615 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1616 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1617 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1618 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1619 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1620 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1621 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1622 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1623 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1624 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1625 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1626 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1627 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1628 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1629 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1630 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1631 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1632 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1633 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1634 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1635 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1636 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1637 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1638 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1639 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1640 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1641 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1642 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1643 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1644 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1645 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1646 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1647 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1648 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1649 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1650 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1651 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1652 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1653 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1654 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1655 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1656 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1657 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1658 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1659 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1660 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1662 /* Fill out the OS statistics structure */
1664 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1665 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1666 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1667 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1668 adapter->net_stats.multicast = adapter->stats.mprcl;
1669 adapter->net_stats.collisions = 0;
1671 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1672 * with a length in the type/len field */
1673 adapter->net_stats.rx_errors =
1674 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1675 adapter->stats.ruc +
1676 adapter->stats.roc /*+ adapter->stats.rlec */ +
1677 adapter->stats.icbc +
1678 adapter->stats.ecbc + adapter->stats.mpc;
1681 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1684 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1685 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1686 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1687 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1689 adapter->net_stats.tx_errors = 0;
1690 adapter->net_stats.rx_frame_errors = 0;
1691 adapter->net_stats.tx_aborted_errors = 0;
1692 adapter->net_stats.tx_carrier_errors = 0;
1693 adapter->net_stats.tx_fifo_errors = 0;
1694 adapter->net_stats.tx_heartbeat_errors = 0;
1695 adapter->net_stats.tx_window_errors = 0;
1698 #define IXGB_MAX_INTR 10
1700 * ixgb_intr - Interrupt Handler
1701 * @irq: interrupt number
1702 * @data: pointer to a network interface device structure
1706 ixgb_intr(int irq, void *data)
1708 struct net_device *netdev = data;
1709 struct ixgb_adapter *adapter = netdev_priv(netdev);
1710 struct ixgb_hw *hw = &adapter->hw;
1711 u32 icr = IXGB_READ_REG(hw, ICR);
1712 #ifndef CONFIG_IXGB_NAPI
1717 return IRQ_NONE; /* Not our interrupt */
1719 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1720 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1721 mod_timer(&adapter->watchdog_timer, jiffies);
1723 #ifdef CONFIG_IXGB_NAPI
1724 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1726 /* Disable interrupts and register for poll. The flush
1727 of the posted write is intentionally left out.
1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1731 __netif_rx_schedule(netdev, &adapter->napi);
1734 /* yes, that is actually a & and it is meant to make sure that
1735 * every pass through this for loop checks both receive and
1736 * transmit queues for completed descriptors, intended to
1737 * avoid starvation issues and assist tx/rx fairness. */
1738 for (i = 0; i < IXGB_MAX_INTR; i++)
1739 if (!ixgb_clean_rx_irq(adapter) &
1740 !ixgb_clean_tx_irq(adapter))
1746 #ifdef CONFIG_IXGB_NAPI
1748 * ixgb_clean - NAPI Rx polling callback
1749 * @adapter: board private structure
1753 ixgb_clean(struct napi_struct *napi, int budget)
1755 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1756 struct net_device *netdev = adapter->netdev;
1759 ixgb_clean_tx_irq(adapter);
1760 ixgb_clean_rx_irq(adapter, &work_done, budget);
1762 /* If budget not fully consumed, exit the polling mode */
1763 if (work_done < budget) {
1764 netif_rx_complete(netdev, napi);
1765 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1766 ixgb_irq_enable(adapter);
1774 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1775 * @adapter: board private structure
1779 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1781 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1782 struct net_device *netdev = adapter->netdev;
1783 struct ixgb_tx_desc *tx_desc, *eop_desc;
1784 struct ixgb_buffer *buffer_info;
1785 unsigned int i, eop;
1786 bool cleaned = false;
1788 i = tx_ring->next_to_clean;
1789 eop = tx_ring->buffer_info[i].next_to_watch;
1790 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1792 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1794 for (cleaned = false; !cleaned; ) {
1795 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1796 buffer_info = &tx_ring->buffer_info[i];
1798 if (tx_desc->popts &
1799 (IXGB_TX_DESC_POPTS_TXSM |
1800 IXGB_TX_DESC_POPTS_IXSM))
1801 adapter->hw_csum_tx_good++;
1803 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1805 *(u32 *)&(tx_desc->status) = 0;
1807 cleaned = (i == eop);
1808 if (++i == tx_ring->count) i = 0;
1811 eop = tx_ring->buffer_info[i].next_to_watch;
1812 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1815 tx_ring->next_to_clean = i;
1817 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1818 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1819 /* Make sure that anybody stopping the queue after this
1820 * sees the new next_to_clean. */
1823 if (netif_queue_stopped(netdev) &&
1824 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1825 netif_wake_queue(netdev);
1826 ++adapter->restart_queue;
1830 if (adapter->detect_tx_hung) {
1831 /* detect a transmit hang in hardware, this serializes the
1832 * check with the clearing of time_stamp and movement of i */
1833 adapter->detect_tx_hung = false;
1834 if (tx_ring->buffer_info[eop].dma &&
1835 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1836 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1837 IXGB_STATUS_TXOFF)) {
1838 /* detected Tx unit hang */
1839 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
1842 " next_to_use <%x>\n"
1843 " next_to_clean <%x>\n"
1844 "buffer_info[next_to_clean]\n"
1845 " time_stamp <%lx>\n"
1846 " next_to_watch <%x>\n"
1848 " next_to_watch.status <%x>\n",
1849 IXGB_READ_REG(&adapter->hw, TDH),
1850 IXGB_READ_REG(&adapter->hw, TDT),
1851 tx_ring->next_to_use,
1852 tx_ring->next_to_clean,
1853 tx_ring->buffer_info[eop].time_stamp,
1857 netif_stop_queue(netdev);
1865 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1866 * @adapter: board private structure
1867 * @rx_desc: receive descriptor
1868 * @sk_buff: socket buffer with received data
1872 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1873 struct ixgb_rx_desc *rx_desc,
1874 struct sk_buff *skb)
1876 /* Ignore Checksum bit is set OR
1877 * TCP Checksum has not been calculated
1879 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1880 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1881 skb->ip_summed = CHECKSUM_NONE;
1885 /* At this point we know the hardware did the TCP checksum */
1886 /* now look at the TCP checksum error bit */
1887 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1888 /* let the stack verify checksum errors */
1889 skb->ip_summed = CHECKSUM_NONE;
1890 adapter->hw_csum_rx_error++;
1892 /* TCP checksum is good */
1893 skb->ip_summed = CHECKSUM_UNNECESSARY;
1894 adapter->hw_csum_rx_good++;
1899 * ixgb_clean_rx_irq - Send received data up the network stack,
1900 * @adapter: board private structure
1904 #ifdef CONFIG_IXGB_NAPI
1905 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1907 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1910 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1911 struct net_device *netdev = adapter->netdev;
1912 struct pci_dev *pdev = adapter->pdev;
1913 struct ixgb_rx_desc *rx_desc, *next_rxd;
1914 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1917 int cleaned_count = 0;
1918 bool cleaned = false;
1920 i = rx_ring->next_to_clean;
1921 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1922 buffer_info = &rx_ring->buffer_info[i];
1924 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1925 struct sk_buff *skb;
1928 #ifdef CONFIG_IXGB_NAPI
1929 if (*work_done >= work_to_do)
1934 status = rx_desc->status;
1935 skb = buffer_info->skb;
1936 buffer_info->skb = NULL;
1938 prefetch(skb->data - NET_IP_ALIGN);
1940 if (++i == rx_ring->count) i = 0;
1941 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1944 if ((j = i + 1) == rx_ring->count) j = 0;
1945 next2_buffer = &rx_ring->buffer_info[j];
1946 prefetch(next2_buffer);
1948 next_buffer = &rx_ring->buffer_info[i];
1953 pci_unmap_single(pdev,
1955 buffer_info->length,
1956 PCI_DMA_FROMDEVICE);
1957 buffer_info->dma = 0;
1959 length = le16_to_cpu(rx_desc->length);
1960 rx_desc->length = 0;
1962 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1964 /* All receives must fit into a single buffer */
1966 IXGB_DBG("Receive packet consumed multiple buffers "
1967 "length<%x>\n", length);
1969 dev_kfree_skb_irq(skb);
1973 if (unlikely(rx_desc->errors &
1974 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
1975 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
1976 dev_kfree_skb_irq(skb);
1980 /* code added for copybreak, this should improve
1981 * performance for small packets with large amounts
1982 * of reassembly being done in the stack */
1983 if (length < copybreak) {
1984 struct sk_buff *new_skb =
1985 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
1987 skb_reserve(new_skb, NET_IP_ALIGN);
1988 skb_copy_to_linear_data_offset(new_skb,
1994 /* save the skb in buffer_info as good */
1995 buffer_info->skb = skb;
1999 /* end copybreak code */
2002 skb_put(skb, length);
2004 /* Receive Checksum Offload */
2005 ixgb_rx_checksum(adapter, rx_desc, skb);
2007 skb->protocol = eth_type_trans(skb, netdev);
2008 #ifdef CONFIG_IXGB_NAPI
2009 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2010 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2011 le16_to_cpu(rx_desc->special));
2013 netif_receive_skb(skb);
2015 #else /* CONFIG_IXGB_NAPI */
2016 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2017 vlan_hwaccel_rx(skb, adapter->vlgrp,
2018 le16_to_cpu(rx_desc->special));
2022 #endif /* CONFIG_IXGB_NAPI */
2023 netdev->last_rx = jiffies;
2026 /* clean up descriptor, might be written over by hw */
2027 rx_desc->status = 0;
2029 /* return some buffers to hardware, one at a time is too slow */
2030 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2031 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2035 /* use prefetched values */
2037 buffer_info = next_buffer;
2040 rx_ring->next_to_clean = i;
2042 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2044 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2050 * ixgb_alloc_rx_buffers - Replace used receive buffers
2051 * @adapter: address of board private structure
2055 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2057 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2058 struct net_device *netdev = adapter->netdev;
2059 struct pci_dev *pdev = adapter->pdev;
2060 struct ixgb_rx_desc *rx_desc;
2061 struct ixgb_buffer *buffer_info;
2062 struct sk_buff *skb;
2066 i = rx_ring->next_to_use;
2067 buffer_info = &rx_ring->buffer_info[i];
2068 cleancount = IXGB_DESC_UNUSED(rx_ring);
2071 /* leave three descriptors unused */
2072 while (--cleancount > 2 && cleaned_count--) {
2073 /* recycle! its good for you */
2074 skb = buffer_info->skb;
2080 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
2082 if (unlikely(!skb)) {
2083 /* Better luck next round */
2084 adapter->alloc_rx_buff_failed++;
2088 /* Make buffer alignment 2 beyond a 16 byte boundary
2089 * this will result in a 16 byte aligned IP header after
2090 * the 14 byte MAC header is removed
2092 skb_reserve(skb, NET_IP_ALIGN);
2094 buffer_info->skb = skb;
2095 buffer_info->length = adapter->rx_buffer_len;
2097 buffer_info->dma = pci_map_single(pdev,
2099 adapter->rx_buffer_len,
2100 PCI_DMA_FROMDEVICE);
2102 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2103 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2104 /* guarantee DD bit not set now before h/w gets descriptor
2105 * this is the rest of the workaround for h/w double
2107 rx_desc->status = 0;
2110 if (++i == rx_ring->count) i = 0;
2111 buffer_info = &rx_ring->buffer_info[i];
2114 if (likely(rx_ring->next_to_use != i)) {
2115 rx_ring->next_to_use = i;
2116 if (unlikely(i-- == 0))
2117 i = (rx_ring->count - 1);
2119 /* Force memory writes to complete before letting h/w
2120 * know there are new descriptors to fetch. (Only
2121 * applicable for weak-ordered memory model archs, such
2124 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2129 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2131 * @param netdev network interface device structure
2132 * @param grp indicates to enable or disable tagging/stripping
2135 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2137 struct ixgb_adapter *adapter = netdev_priv(netdev);
2140 ixgb_irq_disable(adapter);
2141 adapter->vlgrp = grp;
2144 /* enable VLAN tag insert/strip */
2145 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2146 ctrl |= IXGB_CTRL0_VME;
2147 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2149 /* enable VLAN receive filtering */
2151 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2152 rctl |= IXGB_RCTL_VFE;
2153 rctl &= ~IXGB_RCTL_CFIEN;
2154 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2156 /* disable VLAN tag insert/strip */
2158 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2159 ctrl &= ~IXGB_CTRL0_VME;
2160 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2162 /* disable VLAN filtering */
2164 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2165 rctl &= ~IXGB_RCTL_VFE;
2166 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2169 /* don't enable interrupts unless we are UP */
2170 if (adapter->netdev->flags & IFF_UP)
2171 ixgb_irq_enable(adapter);
2175 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2177 struct ixgb_adapter *adapter = netdev_priv(netdev);
2180 /* add VID to filter table */
2182 index = (vid >> 5) & 0x7F;
2183 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2184 vfta |= (1 << (vid & 0x1F));
2185 ixgb_write_vfta(&adapter->hw, index, vfta);
2189 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2191 struct ixgb_adapter *adapter = netdev_priv(netdev);
2194 ixgb_irq_disable(adapter);
2196 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2198 /* don't enable interrupts unless we are UP */
2199 if (adapter->netdev->flags & IFF_UP)
2200 ixgb_irq_enable(adapter);
2202 /* remove VID from filter table */
2204 index = (vid >> 5) & 0x7F;
2205 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2206 vfta &= ~(1 << (vid & 0x1F));
2207 ixgb_write_vfta(&adapter->hw, index, vfta);
2211 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2213 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2215 if (adapter->vlgrp) {
2217 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2218 if (!vlan_group_get_device(adapter->vlgrp, vid))
2220 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2225 #ifdef CONFIG_NET_POLL_CONTROLLER
2227 * Polling 'interrupt' - used by things like netconsole to send skbs
2228 * without having to re-enable interrupts. It's not called while
2229 * the interrupt routine is executing.
2232 static void ixgb_netpoll(struct net_device *dev)
2234 struct ixgb_adapter *adapter = netdev_priv(dev);
2236 disable_irq(adapter->pdev->irq);
2237 ixgb_intr(adapter->pdev->irq, dev);
2238 enable_irq(adapter->pdev->irq);
2243 * ixgb_io_error_detected() - called when PCI error is detected
2244 * @pdev pointer to pci device with error
2245 * @state pci channel state after error
2247 * This callback is called by the PCI subsystem whenever
2248 * a PCI bus error is detected.
2250 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2251 enum pci_channel_state state)
2253 struct net_device *netdev = pci_get_drvdata(pdev);
2254 struct ixgb_adapter *adapter = netdev_priv(netdev);
2256 if (netif_running(netdev))
2257 ixgb_down(adapter, true);
2259 pci_disable_device(pdev);
2261 /* Request a slot reset. */
2262 return PCI_ERS_RESULT_NEED_RESET;
2266 * ixgb_io_slot_reset - called after the pci bus has been reset.
2267 * @pdev pointer to pci device with error
2269 * This callback is called after the PCI bus has been reset.
2270 * Basically, this tries to restart the card from scratch.
2271 * This is a shortened version of the device probe/discovery code,
2272 * it resembles the first-half of the ixgb_probe() routine.
2274 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2276 struct net_device *netdev = pci_get_drvdata(pdev);
2277 struct ixgb_adapter *adapter = netdev_priv(netdev);
2279 if (pci_enable_device(pdev)) {
2280 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
2281 return PCI_ERS_RESULT_DISCONNECT;
2284 /* Perform card reset only on one instance of the card */
2285 if (0 != PCI_FUNC (pdev->devfn))
2286 return PCI_ERS_RESULT_RECOVERED;
2288 pci_set_master(pdev);
2290 netif_carrier_off(netdev);
2291 netif_stop_queue(netdev);
2292 ixgb_reset(adapter);
2294 /* Make sure the EEPROM is good */
2295 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2296 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
2297 return PCI_ERS_RESULT_DISCONNECT;
2299 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2300 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2302 if (!is_valid_ether_addr(netdev->perm_addr)) {
2303 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
2304 return PCI_ERS_RESULT_DISCONNECT;
2307 return PCI_ERS_RESULT_RECOVERED;
2311 * ixgb_io_resume - called when its OK to resume normal operations
2312 * @pdev pointer to pci device with error
2314 * The error recovery driver tells us that its OK to resume
2315 * normal operation. Implementation resembles the second-half
2316 * of the ixgb_probe() routine.
2318 static void ixgb_io_resume(struct pci_dev *pdev)
2320 struct net_device *netdev = pci_get_drvdata(pdev);
2321 struct ixgb_adapter *adapter = netdev_priv(netdev);
2323 pci_set_master(pdev);
2325 if (netif_running(netdev)) {
2326 if (ixgb_up(adapter)) {
2327 printk ("ixgb: can't bring device back up after reset\n");
2332 netif_device_attach(netdev);
2333 mod_timer(&adapter->watchdog_timer, jiffies);