1 /*******************************************************************************
4 Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *******************************************************************************/
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
37 #define DRIVERNAPI "-NAPI"
39 #define DRV_VERSION "7.1.9-k6"DRIVERNAPI
40 char e1000_driver_version[] = DRV_VERSION;
41 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 /* e1000_pci_tbl - PCI Device ID Table
45 * Last entry must be all 0s
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
50 static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1049),
76 INTEL_E1000_ETHERNET_DEVICE(0x104A),
77 INTEL_E1000_ETHERNET_DEVICE(0x104B),
78 INTEL_E1000_ETHERNET_DEVICE(0x104C),
79 INTEL_E1000_ETHERNET_DEVICE(0x104D),
80 INTEL_E1000_ETHERNET_DEVICE(0x105E),
81 INTEL_E1000_ETHERNET_DEVICE(0x105F),
82 INTEL_E1000_ETHERNET_DEVICE(0x1060),
83 INTEL_E1000_ETHERNET_DEVICE(0x1075),
84 INTEL_E1000_ETHERNET_DEVICE(0x1076),
85 INTEL_E1000_ETHERNET_DEVICE(0x1077),
86 INTEL_E1000_ETHERNET_DEVICE(0x1078),
87 INTEL_E1000_ETHERNET_DEVICE(0x1079),
88 INTEL_E1000_ETHERNET_DEVICE(0x107A),
89 INTEL_E1000_ETHERNET_DEVICE(0x107B),
90 INTEL_E1000_ETHERNET_DEVICE(0x107C),
91 INTEL_E1000_ETHERNET_DEVICE(0x107D),
92 INTEL_E1000_ETHERNET_DEVICE(0x107E),
93 INTEL_E1000_ETHERNET_DEVICE(0x107F),
94 INTEL_E1000_ETHERNET_DEVICE(0x108A),
95 INTEL_E1000_ETHERNET_DEVICE(0x108B),
96 INTEL_E1000_ETHERNET_DEVICE(0x108C),
97 INTEL_E1000_ETHERNET_DEVICE(0x1096),
98 INTEL_E1000_ETHERNET_DEVICE(0x1098),
99 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A),
101 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
102 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
103 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
105 /* required last entry */
109 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
111 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
112 struct e1000_tx_ring *txdr);
113 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
114 struct e1000_rx_ring *rxdr);
115 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
116 struct e1000_tx_ring *tx_ring);
117 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
118 struct e1000_rx_ring *rx_ring);
120 /* Local Function Prototypes */
122 static int e1000_init_module(void);
123 static void e1000_exit_module(void);
124 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
125 static void __devexit e1000_remove(struct pci_dev *pdev);
126 static int e1000_alloc_queues(struct e1000_adapter *adapter);
127 static int e1000_sw_init(struct e1000_adapter *adapter);
128 static int e1000_open(struct net_device *netdev);
129 static int e1000_close(struct net_device *netdev);
130 static void e1000_configure_tx(struct e1000_adapter *adapter);
131 static void e1000_configure_rx(struct e1000_adapter *adapter);
132 static void e1000_setup_rctl(struct e1000_adapter *adapter);
133 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
134 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
135 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
136 struct e1000_tx_ring *tx_ring);
137 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
138 struct e1000_rx_ring *rx_ring);
139 static void e1000_set_multi(struct net_device *netdev);
140 static void e1000_update_phy_info(unsigned long data);
141 static void e1000_watchdog(unsigned long data);
142 static void e1000_82547_tx_fifo_stall(unsigned long data);
143 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
144 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
145 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
146 static int e1000_set_mac(struct net_device *netdev, void *p);
147 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
148 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
149 struct e1000_tx_ring *tx_ring);
150 #ifdef CONFIG_E1000_NAPI
151 static int e1000_clean(struct net_device *poll_dev, int *budget);
152 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
154 int *work_done, int work_to_do);
155 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int *work_done, int work_to_do);
159 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
160 struct e1000_rx_ring *rx_ring);
161 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
162 struct e1000_rx_ring *rx_ring);
164 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
165 struct e1000_rx_ring *rx_ring,
167 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
168 struct e1000_rx_ring *rx_ring,
170 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
171 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
173 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
174 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
175 static void e1000_tx_timeout(struct net_device *dev);
176 static void e1000_reset_task(struct net_device *dev);
177 static void e1000_smartspeed(struct e1000_adapter *adapter);
178 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
179 struct sk_buff *skb);
181 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
182 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
183 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
184 static void e1000_restore_vlan(struct e1000_adapter *adapter);
186 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
188 static int e1000_resume(struct pci_dev *pdev);
190 static void e1000_shutdown(struct pci_dev *pdev);
192 #ifdef CONFIG_NET_POLL_CONTROLLER
193 /* for netdump / net console */
194 static void e1000_netpoll (struct net_device *netdev);
197 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198 pci_channel_state_t state);
199 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200 static void e1000_io_resume(struct pci_dev *pdev);
202 static struct pci_error_handlers e1000_err_handler = {
203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
208 static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
212 .remove = __devexit_p(e1000_remove),
213 /* Power Managment Hooks */
214 .suspend = e1000_suspend,
216 .resume = e1000_resume,
218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION);
227 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
228 module_param(debug, int, 0);
229 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
232 * e1000_init_module - Driver Registration Routine
234 * e1000_init_module is the first routine called when the driver is
235 * loaded. All it does is register with the PCI subsystem.
239 e1000_init_module(void)
242 printk(KERN_INFO "%s - version %s\n",
243 e1000_driver_string, e1000_driver_version);
245 printk(KERN_INFO "%s\n", e1000_copyright);
247 ret = pci_register_driver(&e1000_driver);
252 module_init(e1000_init_module);
255 * e1000_exit_module - Driver Exit Cleanup Routine
257 * e1000_exit_module is called just before the driver is removed
262 e1000_exit_module(void)
264 pci_unregister_driver(&e1000_driver);
267 module_exit(e1000_exit_module);
269 static int e1000_request_irq(struct e1000_adapter *adapter)
271 struct net_device *netdev = adapter->netdev;
275 #ifdef CONFIG_PCI_MSI
276 if (adapter->hw.mac_type > e1000_82547_rev_2) {
277 adapter->have_msi = TRUE;
278 if ((err = pci_enable_msi(adapter->pdev))) {
280 "Unable to allocate MSI interrupt Error: %d\n", err);
281 adapter->have_msi = FALSE;
284 if (adapter->have_msi)
285 flags &= ~IRQF_SHARED;
287 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
288 netdev->name, netdev)))
290 "Unable to allocate interrupt Error: %d\n", err);
295 static void e1000_free_irq(struct e1000_adapter *adapter)
297 struct net_device *netdev = adapter->netdev;
299 free_irq(adapter->pdev->irq, netdev);
301 #ifdef CONFIG_PCI_MSI
302 if (adapter->have_msi)
303 pci_disable_msi(adapter->pdev);
308 * e1000_irq_disable - Mask off interrupt generation on the NIC
309 * @adapter: board private structure
313 e1000_irq_disable(struct e1000_adapter *adapter)
315 atomic_inc(&adapter->irq_sem);
316 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
317 E1000_WRITE_FLUSH(&adapter->hw);
318 synchronize_irq(adapter->pdev->irq);
322 * e1000_irq_enable - Enable default interrupt generation settings
323 * @adapter: board private structure
327 e1000_irq_enable(struct e1000_adapter *adapter)
329 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
330 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
331 E1000_WRITE_FLUSH(&adapter->hw);
336 e1000_update_mng_vlan(struct e1000_adapter *adapter)
338 struct net_device *netdev = adapter->netdev;
339 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
340 uint16_t old_vid = adapter->mng_vlan_id;
341 if (adapter->vlgrp) {
342 if (!adapter->vlgrp->vlan_devices[vid]) {
343 if (adapter->hw.mng_cookie.status &
344 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
345 e1000_vlan_rx_add_vid(netdev, vid);
346 adapter->mng_vlan_id = vid;
348 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
350 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
352 !adapter->vlgrp->vlan_devices[old_vid])
353 e1000_vlan_rx_kill_vid(netdev, old_vid);
355 adapter->mng_vlan_id = vid;
360 * e1000_release_hw_control - release control of the h/w to f/w
361 * @adapter: address of board private structure
363 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
364 * For ASF and Pass Through versions of f/w this means that the
365 * driver is no longer loaded. For AMT version (only with 82573) i
366 * of the f/w this means that the netowrk i/f is closed.
371 e1000_release_hw_control(struct e1000_adapter *adapter)
377 /* Let firmware taken over control of h/w */
378 switch (adapter->hw.mac_type) {
381 case e1000_80003es2lan:
382 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
383 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
384 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
387 swsm = E1000_READ_REG(&adapter->hw, SWSM);
388 E1000_WRITE_REG(&adapter->hw, SWSM,
389 swsm & ~E1000_SWSM_DRV_LOAD);
391 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
392 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
393 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
401 * e1000_get_hw_control - get control of the h/w from f/w
402 * @adapter: address of board private structure
404 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
405 * For ASF and Pass Through versions of f/w this means that
406 * the driver is loaded. For AMT version (only with 82573)
407 * of the f/w this means that the netowrk i/f is open.
412 e1000_get_hw_control(struct e1000_adapter *adapter)
417 /* Let firmware know the driver has taken over */
418 switch (adapter->hw.mac_type) {
421 case e1000_80003es2lan:
422 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
423 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
424 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
427 swsm = E1000_READ_REG(&adapter->hw, SWSM);
428 E1000_WRITE_REG(&adapter->hw, SWSM,
429 swsm | E1000_SWSM_DRV_LOAD);
432 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
433 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
434 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
442 e1000_up(struct e1000_adapter *adapter)
444 struct net_device *netdev = adapter->netdev;
447 /* hardware has been reset, we need to reload some things */
449 e1000_set_multi(netdev);
451 e1000_restore_vlan(adapter);
453 e1000_configure_tx(adapter);
454 e1000_setup_rctl(adapter);
455 e1000_configure_rx(adapter);
456 /* call E1000_DESC_UNUSED which always leaves
457 * at least 1 descriptor unused to make sure
458 * next_to_use != next_to_clean */
459 for (i = 0; i < adapter->num_rx_queues; i++) {
460 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
461 adapter->alloc_rx_buf(adapter, ring,
462 E1000_DESC_UNUSED(ring));
465 adapter->tx_queue_len = netdev->tx_queue_len;
467 mod_timer(&adapter->watchdog_timer, jiffies);
469 #ifdef CONFIG_E1000_NAPI
470 netif_poll_enable(netdev);
472 e1000_irq_enable(adapter);
478 * e1000_power_up_phy - restore link in case the phy was powered down
479 * @adapter: address of board private structure
481 * The phy may be powered down to save power and turn off link when the
482 * driver is unloaded and wake on lan is not enabled (among others)
483 * *** this routine MUST be followed by a call to e1000_reset ***
487 void e1000_power_up_phy(struct e1000_adapter *adapter)
489 uint16_t mii_reg = 0;
491 /* Just clear the power down bit to wake the phy back up */
492 if (adapter->hw.media_type == e1000_media_type_copper) {
493 /* according to the manual, the phy will retain its
494 * settings across a power-down/up cycle */
495 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
496 mii_reg &= ~MII_CR_POWER_DOWN;
497 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
501 static void e1000_power_down_phy(struct e1000_adapter *adapter)
503 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
504 e1000_check_mng_mode(&adapter->hw);
505 /* Power down the PHY so no link is implied when interface is down
506 * The PHY cannot be powered down if any of the following is TRUE
509 * (c) SoL/IDER session is active */
510 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
511 adapter->hw.mac_type != e1000_ich8lan &&
512 adapter->hw.media_type == e1000_media_type_copper &&
513 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
515 !e1000_check_phy_reset_block(&adapter->hw)) {
516 uint16_t mii_reg = 0;
517 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
518 mii_reg |= MII_CR_POWER_DOWN;
519 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
525 e1000_down(struct e1000_adapter *adapter)
527 struct net_device *netdev = adapter->netdev;
529 e1000_irq_disable(adapter);
531 del_timer_sync(&adapter->tx_fifo_stall_timer);
532 del_timer_sync(&adapter->watchdog_timer);
533 del_timer_sync(&adapter->phy_info_timer);
535 #ifdef CONFIG_E1000_NAPI
536 netif_poll_disable(netdev);
538 netdev->tx_queue_len = adapter->tx_queue_len;
539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
542 netif_stop_queue(netdev);
544 e1000_reset(adapter);
545 e1000_clean_all_tx_rings(adapter);
546 e1000_clean_all_rx_rings(adapter);
550 e1000_reinit_locked(struct e1000_adapter *adapter)
552 WARN_ON(in_interrupt());
553 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
557 clear_bit(__E1000_RESETTING, &adapter->flags);
561 e1000_reset(struct e1000_adapter *adapter)
564 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
566 /* Repartition Pba for greater than 9k mtu
567 * To take effect CTRL.RST is required.
570 switch (adapter->hw.mac_type) {
572 case e1000_82547_rev_2:
577 case e1000_80003es2lan:
591 if ((adapter->hw.mac_type != e1000_82573) &&
592 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
593 pba -= 8; /* allocate more FIFO for Tx */
596 if (adapter->hw.mac_type == e1000_82547) {
597 adapter->tx_fifo_head = 0;
598 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
599 adapter->tx_fifo_size =
600 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
601 atomic_set(&adapter->tx_fifo_stall, 0);
604 E1000_WRITE_REG(&adapter->hw, PBA, pba);
606 /* flow control settings */
607 /* Set the FC high water mark to 90% of the FIFO size.
608 * Required to clear last 3 LSB */
609 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
610 /* We can't use 90% on small FIFOs because the remainder
611 * would be less than 1 full frame. In this case, we size
612 * it to allow at least a full frame above the high water
614 if (pba < E1000_PBA_16K)
615 fc_high_water_mark = (pba * 1024) - 1600;
617 adapter->hw.fc_high_water = fc_high_water_mark;
618 adapter->hw.fc_low_water = fc_high_water_mark - 8;
619 if (adapter->hw.mac_type == e1000_80003es2lan)
620 adapter->hw.fc_pause_time = 0xFFFF;
622 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
623 adapter->hw.fc_send_xon = 1;
624 adapter->hw.fc = adapter->hw.original_fc;
626 /* Allow time for pending master requests to run */
627 e1000_reset_hw(&adapter->hw);
628 if (adapter->hw.mac_type >= e1000_82544)
629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
630 if (e1000_init_hw(&adapter->hw))
631 DPRINTK(PROBE, ERR, "Hardware Error\n");
632 e1000_update_mng_vlan(adapter);
633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
634 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
636 e1000_reset_adaptive(&adapter->hw);
637 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
639 if (!adapter->smart_power_down &&
640 (adapter->hw.mac_type == e1000_82571 ||
641 adapter->hw.mac_type == e1000_82572)) {
642 uint16_t phy_data = 0;
643 /* speed up time to link by disabling smart power down, ignore
644 * the return value of this function because there is nothing
645 * different we would do if it failed */
646 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
648 phy_data &= ~IGP02E1000_PM_SPD;
649 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
653 if (adapter->hw.mac_type < e1000_ich8lan)
654 /* FIXME: this code is duplicate and wrong for PCI Express */
655 if (adapter->en_mng_pt) {
656 manc = E1000_READ_REG(&adapter->hw, MANC);
657 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
658 E1000_WRITE_REG(&adapter->hw, MANC, manc);
663 * e1000_probe - Device Initialization Routine
664 * @pdev: PCI device information struct
665 * @ent: entry in e1000_pci_tbl
667 * Returns 0 on success, negative on failure
669 * e1000_probe initializes an adapter identified by a pci_dev structure.
670 * The OS initialization, configuring of the adapter private structure,
671 * and a hardware reset occur.
675 e1000_probe(struct pci_dev *pdev,
676 const struct pci_device_id *ent)
678 struct net_device *netdev;
679 struct e1000_adapter *adapter;
680 unsigned long mmio_start, mmio_len;
681 unsigned long flash_start, flash_len;
683 static int cards_found = 0;
684 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
685 int i, err, pci_using_dac;
686 uint16_t eeprom_data;
687 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
688 if ((err = pci_enable_device(pdev)))
691 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
692 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
695 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
696 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
697 E1000_ERR("No usable DMA configuration, aborting\n");
703 if ((err = pci_request_regions(pdev, e1000_driver_name)))
706 pci_set_master(pdev);
709 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
711 goto err_alloc_etherdev;
713 SET_MODULE_OWNER(netdev);
714 SET_NETDEV_DEV(netdev, &pdev->dev);
716 pci_set_drvdata(pdev, netdev);
717 adapter = netdev_priv(netdev);
718 adapter->netdev = netdev;
719 adapter->pdev = pdev;
720 adapter->hw.back = adapter;
721 adapter->msg_enable = (1 << debug) - 1;
723 mmio_start = pci_resource_start(pdev, BAR_0);
724 mmio_len = pci_resource_len(pdev, BAR_0);
727 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
728 if (!adapter->hw.hw_addr)
731 for (i = BAR_1; i <= BAR_5; i++) {
732 if (pci_resource_len(pdev, i) == 0)
734 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
735 adapter->hw.io_base = pci_resource_start(pdev, i);
740 netdev->open = &e1000_open;
741 netdev->stop = &e1000_close;
742 netdev->hard_start_xmit = &e1000_xmit_frame;
743 netdev->get_stats = &e1000_get_stats;
744 netdev->set_multicast_list = &e1000_set_multi;
745 netdev->set_mac_address = &e1000_set_mac;
746 netdev->change_mtu = &e1000_change_mtu;
747 netdev->do_ioctl = &e1000_ioctl;
748 e1000_set_ethtool_ops(netdev);
749 netdev->tx_timeout = &e1000_tx_timeout;
750 netdev->watchdog_timeo = 5 * HZ;
751 #ifdef CONFIG_E1000_NAPI
752 netdev->poll = &e1000_clean;
755 netdev->vlan_rx_register = e1000_vlan_rx_register;
756 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
757 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
758 #ifdef CONFIG_NET_POLL_CONTROLLER
759 netdev->poll_controller = e1000_netpoll;
761 strcpy(netdev->name, pci_name(pdev));
763 netdev->mem_start = mmio_start;
764 netdev->mem_end = mmio_start + mmio_len;
765 netdev->base_addr = adapter->hw.io_base;
767 adapter->bd_number = cards_found;
769 /* setup the private structure */
771 if ((err = e1000_sw_init(adapter)))
775 /* Flash BAR mapping must happen after e1000_sw_init
776 * because it depends on mac_type */
777 if ((adapter->hw.mac_type == e1000_ich8lan) &&
778 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
779 flash_start = pci_resource_start(pdev, 1);
780 flash_len = pci_resource_len(pdev, 1);
781 adapter->hw.flash_address = ioremap(flash_start, flash_len);
782 if (!adapter->hw.flash_address)
786 if (e1000_check_phy_reset_block(&adapter->hw))
787 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
789 /* if ksp3, indicate if it's port a being setup */
790 if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
791 e1000_ksp3_port_a == 0)
792 adapter->ksp3_port_a = 1;
794 /* Reset for multiple KP3 adapters */
795 if (e1000_ksp3_port_a == 4)
796 e1000_ksp3_port_a = 0;
798 if (adapter->hw.mac_type >= e1000_82543) {
799 netdev->features = NETIF_F_SG |
803 NETIF_F_HW_VLAN_FILTER;
804 if (adapter->hw.mac_type == e1000_ich8lan)
805 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
809 if ((adapter->hw.mac_type >= e1000_82544) &&
810 (adapter->hw.mac_type != e1000_82547))
811 netdev->features |= NETIF_F_TSO;
813 #ifdef NETIF_F_TSO_IPV6
814 if (adapter->hw.mac_type > e1000_82547_rev_2)
815 netdev->features |= NETIF_F_TSO_IPV6;
819 netdev->features |= NETIF_F_HIGHDMA;
821 netdev->features |= NETIF_F_LLTX;
823 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
825 /* initialize eeprom parameters */
827 if (e1000_init_eeprom_params(&adapter->hw)) {
828 E1000_ERR("EEPROM initialization failed\n");
832 /* before reading the EEPROM, reset the controller to
833 * put the device in a known good starting state */
835 e1000_reset_hw(&adapter->hw);
837 /* make sure the EEPROM is good */
839 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
840 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
844 /* copy the MAC address out of the EEPROM */
846 if (e1000_read_mac_addr(&adapter->hw))
847 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
848 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
849 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
851 if (!is_valid_ether_addr(netdev->perm_addr)) {
852 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
856 e1000_read_part_num(&adapter->hw, &(adapter->part_num));
858 e1000_get_bus_info(&adapter->hw);
860 init_timer(&adapter->tx_fifo_stall_timer);
861 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
862 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
864 init_timer(&adapter->watchdog_timer);
865 adapter->watchdog_timer.function = &e1000_watchdog;
866 adapter->watchdog_timer.data = (unsigned long) adapter;
868 init_timer(&adapter->phy_info_timer);
869 adapter->phy_info_timer.function = &e1000_update_phy_info;
870 adapter->phy_info_timer.data = (unsigned long) adapter;
872 INIT_WORK(&adapter->reset_task,
873 (void (*)(void *))e1000_reset_task, netdev);
875 /* we're going to reset, so assume we have no link for now */
877 netif_carrier_off(netdev);
878 netif_stop_queue(netdev);
880 e1000_check_options(adapter);
882 /* Initial Wake on LAN setting
883 * If APM wake is enabled in the EEPROM,
884 * enable the ACPI Magic Packet filter
887 switch (adapter->hw.mac_type) {
888 case e1000_82542_rev2_0:
889 case e1000_82542_rev2_1:
893 e1000_read_eeprom(&adapter->hw,
894 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
895 eeprom_apme_mask = E1000_EEPROM_82544_APM;
898 e1000_read_eeprom(&adapter->hw,
899 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
900 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
903 case e1000_82546_rev_3:
905 case e1000_80003es2lan:
906 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
907 e1000_read_eeprom(&adapter->hw,
908 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
913 e1000_read_eeprom(&adapter->hw,
914 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
917 if (eeprom_data & eeprom_apme_mask)
918 adapter->wol |= E1000_WUFC_MAG;
920 /* print bus type/speed/width info */
922 struct e1000_hw *hw = &adapter->hw;
923 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
924 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
925 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
926 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
927 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
928 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
929 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
930 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
931 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
932 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
933 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
937 for (i = 0; i < 6; i++)
938 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
940 /* reset the hardware with the new settings */
941 e1000_reset(adapter);
943 /* If the controller is 82573 and f/w is AMT, do not set
944 * DRV_LOAD until the interface is up. For all other cases,
945 * let the f/w know that the h/w is now under the control
947 if (adapter->hw.mac_type != e1000_82573 ||
948 !e1000_check_mng_mode(&adapter->hw))
949 e1000_get_hw_control(adapter);
951 strcpy(netdev->name, "eth%d");
952 if ((err = register_netdev(netdev)))
955 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
961 e1000_release_hw_control(adapter);
963 if (!e1000_check_phy_reset_block(&adapter->hw))
964 e1000_phy_hw_reset(&adapter->hw);
966 if (adapter->hw.flash_address)
967 iounmap(adapter->hw.flash_address);
969 #ifdef CONFIG_E1000_NAPI
970 for (i = 0; i < adapter->num_rx_queues; i++)
971 dev_put(&adapter->polling_netdev[i]);
974 kfree(adapter->tx_ring);
975 kfree(adapter->rx_ring);
976 #ifdef CONFIG_E1000_NAPI
977 kfree(adapter->polling_netdev);
980 iounmap(adapter->hw.hw_addr);
984 pci_release_regions(pdev);
987 pci_disable_device(pdev);
992 * e1000_remove - Device Removal Routine
993 * @pdev: PCI device information struct
995 * e1000_remove is called by the PCI subsystem to alert the driver
996 * that it should release a PCI device. The could be caused by a
997 * Hot-Plug event, or because the driver is going to be removed from
1001 static void __devexit
1002 e1000_remove(struct pci_dev *pdev)
1004 struct net_device *netdev = pci_get_drvdata(pdev);
1005 struct e1000_adapter *adapter = netdev_priv(netdev);
1007 #ifdef CONFIG_E1000_NAPI
1011 flush_scheduled_work();
1013 if (adapter->hw.mac_type >= e1000_82540 &&
1014 adapter->hw.mac_type != e1000_ich8lan &&
1015 adapter->hw.media_type == e1000_media_type_copper) {
1016 manc = E1000_READ_REG(&adapter->hw, MANC);
1017 if (manc & E1000_MANC_SMBUS_EN) {
1018 manc |= E1000_MANC_ARP_EN;
1019 E1000_WRITE_REG(&adapter->hw, MANC, manc);
1023 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1024 * would have already happened in close and is redundant. */
1025 e1000_release_hw_control(adapter);
1027 unregister_netdev(netdev);
1028 #ifdef CONFIG_E1000_NAPI
1029 for (i = 0; i < adapter->num_rx_queues; i++)
1030 dev_put(&adapter->polling_netdev[i]);
1033 if (!e1000_check_phy_reset_block(&adapter->hw))
1034 e1000_phy_hw_reset(&adapter->hw);
1036 kfree(adapter->tx_ring);
1037 kfree(adapter->rx_ring);
1038 #ifdef CONFIG_E1000_NAPI
1039 kfree(adapter->polling_netdev);
1042 iounmap(adapter->hw.hw_addr);
1043 if (adapter->hw.flash_address)
1044 iounmap(adapter->hw.flash_address);
1045 pci_release_regions(pdev);
1047 free_netdev(netdev);
1049 pci_disable_device(pdev);
1053 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1054 * @adapter: board private structure to initialize
1056 * e1000_sw_init initializes the Adapter private data structure.
1057 * Fields are initialized based on PCI device information and
1058 * OS network device settings (MTU size).
1061 static int __devinit
1062 e1000_sw_init(struct e1000_adapter *adapter)
1064 struct e1000_hw *hw = &adapter->hw;
1065 struct net_device *netdev = adapter->netdev;
1066 struct pci_dev *pdev = adapter->pdev;
1067 #ifdef CONFIG_E1000_NAPI
1071 /* PCI config space info */
1073 hw->vendor_id = pdev->vendor;
1074 hw->device_id = pdev->device;
1075 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1076 hw->subsystem_id = pdev->subsystem_device;
1078 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
1080 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1082 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1083 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1084 hw->max_frame_size = netdev->mtu +
1085 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1086 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1088 /* identify the MAC */
1090 if (e1000_set_mac_type(hw)) {
1091 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1095 switch (hw->mac_type) {
1100 case e1000_82541_rev_2:
1101 case e1000_82547_rev_2:
1102 hw->phy_init_script = 1;
1106 e1000_set_media_type(hw);
1108 hw->wait_autoneg_complete = FALSE;
1109 hw->tbi_compatibility_en = TRUE;
1110 hw->adaptive_ifs = TRUE;
1112 /* Copper options */
1114 if (hw->media_type == e1000_media_type_copper) {
1115 hw->mdix = AUTO_ALL_MODES;
1116 hw->disable_polarity_correction = FALSE;
1117 hw->master_slave = E1000_MASTER_SLAVE;
1120 adapter->num_tx_queues = 1;
1121 adapter->num_rx_queues = 1;
1123 if (e1000_alloc_queues(adapter)) {
1124 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1128 #ifdef CONFIG_E1000_NAPI
1129 for (i = 0; i < adapter->num_rx_queues; i++) {
1130 adapter->polling_netdev[i].priv = adapter;
1131 adapter->polling_netdev[i].poll = &e1000_clean;
1132 adapter->polling_netdev[i].weight = 64;
1133 dev_hold(&adapter->polling_netdev[i]);
1134 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1136 spin_lock_init(&adapter->tx_queue_lock);
1139 atomic_set(&adapter->irq_sem, 1);
1140 spin_lock_init(&adapter->stats_lock);
1146 * e1000_alloc_queues - Allocate memory for all rings
1147 * @adapter: board private structure to initialize
1149 * We allocate one ring per queue at run-time since we don't know the
1150 * number of queues at compile-time. The polling_netdev array is
1151 * intended for Multiqueue, but should work fine with a single queue.
1154 static int __devinit
1155 e1000_alloc_queues(struct e1000_adapter *adapter)
1159 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
1160 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
1161 if (!adapter->tx_ring)
1163 memset(adapter->tx_ring, 0, size);
1165 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
1166 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
1167 if (!adapter->rx_ring) {
1168 kfree(adapter->tx_ring);
1171 memset(adapter->rx_ring, 0, size);
1173 #ifdef CONFIG_E1000_NAPI
1174 size = sizeof(struct net_device) * adapter->num_rx_queues;
1175 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
1176 if (!adapter->polling_netdev) {
1177 kfree(adapter->tx_ring);
1178 kfree(adapter->rx_ring);
1181 memset(adapter->polling_netdev, 0, size);
1184 return E1000_SUCCESS;
1188 * e1000_open - Called when a network interface is made active
1189 * @netdev: network interface device structure
1191 * Returns 0 on success, negative value on failure
1193 * The open entry point is called when a network interface is made
1194 * active by the system (IFF_UP). At this point all resources needed
1195 * for transmit and receive operations are allocated, the interrupt
1196 * handler is registered with the OS, the watchdog timer is started,
1197 * and the stack is notified that the interface is ready.
1201 e1000_open(struct net_device *netdev)
1203 struct e1000_adapter *adapter = netdev_priv(netdev);
1206 /* disallow open during test */
1207 if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
1210 /* allocate transmit descriptors */
1212 if ((err = e1000_setup_all_tx_resources(adapter)))
1215 /* allocate receive descriptors */
1217 if ((err = e1000_setup_all_rx_resources(adapter)))
1220 err = e1000_request_irq(adapter);
1224 e1000_power_up_phy(adapter);
1226 if ((err = e1000_up(adapter)))
1228 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1229 if ((adapter->hw.mng_cookie.status &
1230 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1231 e1000_update_mng_vlan(adapter);
1234 /* If AMT is enabled, let the firmware know that the network
1235 * interface is now open */
1236 if (adapter->hw.mac_type == e1000_82573 &&
1237 e1000_check_mng_mode(&adapter->hw))
1238 e1000_get_hw_control(adapter);
1240 return E1000_SUCCESS;
1243 e1000_power_down_phy(adapter);
1244 e1000_free_irq(adapter);
1246 e1000_free_all_rx_resources(adapter);
1248 e1000_free_all_tx_resources(adapter);
1250 e1000_reset(adapter);
1256 * e1000_close - Disables a network interface
1257 * @netdev: network interface device structure
1259 * Returns 0, this is not allowed to fail
1261 * The close entry point is called when an interface is de-activated
1262 * by the OS. The hardware is still under the drivers control, but
1263 * needs to be disabled. A global MAC reset is issued to stop the
1264 * hardware, and all transmit and receive resources are freed.
1268 e1000_close(struct net_device *netdev)
1270 struct e1000_adapter *adapter = netdev_priv(netdev);
1272 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1273 e1000_down(adapter);
1274 e1000_power_down_phy(adapter);
1275 e1000_free_irq(adapter);
1277 e1000_free_all_tx_resources(adapter);
1278 e1000_free_all_rx_resources(adapter);
1280 if ((adapter->hw.mng_cookie.status &
1281 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1282 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1285 /* If AMT is enabled, let the firmware know that the network
1286 * interface is now closed */
1287 if (adapter->hw.mac_type == e1000_82573 &&
1288 e1000_check_mng_mode(&adapter->hw))
1289 e1000_release_hw_control(adapter);
1295 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1296 * @adapter: address of board private structure
1297 * @start: address of beginning of memory
1298 * @len: length of memory
1301 e1000_check_64k_bound(struct e1000_adapter *adapter,
1302 void *start, unsigned long len)
1304 unsigned long begin = (unsigned long) start;
1305 unsigned long end = begin + len;
1307 /* First rev 82545 and 82546 need to not allow any memory
1308 * write location to cross 64k boundary due to errata 23 */
1309 if (adapter->hw.mac_type == e1000_82545 ||
1310 adapter->hw.mac_type == e1000_82546) {
1311 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
1318 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1319 * @adapter: board private structure
1320 * @txdr: tx descriptor ring (for a specific queue) to setup
1322 * Return 0 on success, negative on failure
1326 e1000_setup_tx_resources(struct e1000_adapter *adapter,
1327 struct e1000_tx_ring *txdr)
1329 struct pci_dev *pdev = adapter->pdev;
1332 size = sizeof(struct e1000_buffer) * txdr->count;
1333 txdr->buffer_info = vmalloc(size);
1334 if (!txdr->buffer_info) {
1336 "Unable to allocate memory for the transmit descriptor ring\n");
1339 memset(txdr->buffer_info, 0, size);
1341 /* round up to nearest 4K */
1343 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1344 E1000_ROUNDUP(txdr->size, 4096);
1346 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1349 vfree(txdr->buffer_info);
1351 "Unable to allocate memory for the transmit descriptor ring\n");
1355 /* Fix for errata 23, can't cross 64kB boundary */
1356 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1357 void *olddesc = txdr->desc;
1358 dma_addr_t olddma = txdr->dma;
1359 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1360 "at %p\n", txdr->size, txdr->desc);
1361 /* Try again, without freeing the previous */
1362 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1363 /* Failed allocation, critical failure */
1365 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1366 goto setup_tx_desc_die;
1369 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1371 pci_free_consistent(pdev, txdr->size, txdr->desc,
1373 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1375 "Unable to allocate aligned memory "
1376 "for the transmit descriptor ring\n");
1377 vfree(txdr->buffer_info);
1380 /* Free old allocation, new allocation was successful */
1381 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1384 memset(txdr->desc, 0, txdr->size);
1386 txdr->next_to_use = 0;
1387 txdr->next_to_clean = 0;
1388 spin_lock_init(&txdr->tx_lock);
1394 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1395 * (Descriptors) for all queues
1396 * @adapter: board private structure
1398 * If this function returns with an error, then it's possible one or
1399 * more of the rings is populated (while the rest are not). It is the
1400 * callers duty to clean those orphaned rings.
1402 * Return 0 on success, negative on failure
1406 e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1410 for (i = 0; i < adapter->num_tx_queues; i++) {
1411 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1414 "Allocation for Tx Queue %u failed\n", i);
1423 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1424 * @adapter: board private structure
1426 * Configure the Tx unit of the MAC after a reset.
1430 e1000_configure_tx(struct e1000_adapter *adapter)
1433 struct e1000_hw *hw = &adapter->hw;
1434 uint32_t tdlen, tctl, tipg, tarc;
1435 uint32_t ipgr1, ipgr2;
1437 /* Setup the HW Tx Head and Tail descriptor pointers */
1439 switch (adapter->num_tx_queues) {
1442 tdba = adapter->tx_ring[0].dma;
1443 tdlen = adapter->tx_ring[0].count *
1444 sizeof(struct e1000_tx_desc);
1445 E1000_WRITE_REG(hw, TDLEN, tdlen);
1446 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1447 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1448 E1000_WRITE_REG(hw, TDT, 0);
1449 E1000_WRITE_REG(hw, TDH, 0);
1450 adapter->tx_ring[0].tdh = E1000_TDH;
1451 adapter->tx_ring[0].tdt = E1000_TDT;
1455 /* Set the default values for the Tx Inter Packet Gap timer */
1457 if (hw->media_type == e1000_media_type_fiber ||
1458 hw->media_type == e1000_media_type_internal_serdes)
1459 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1461 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1463 switch (hw->mac_type) {
1464 case e1000_82542_rev2_0:
1465 case e1000_82542_rev2_1:
1466 tipg = DEFAULT_82542_TIPG_IPGT;
1467 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1468 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1470 case e1000_80003es2lan:
1471 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1472 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1475 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1476 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1479 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1480 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1481 E1000_WRITE_REG(hw, TIPG, tipg);
1483 /* Set the Tx Interrupt Delay register */
1485 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1486 if (hw->mac_type >= e1000_82540)
1487 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1489 /* Program the Transmit Control Register */
1491 tctl = E1000_READ_REG(hw, TCTL);
1493 tctl &= ~E1000_TCTL_CT;
1494 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1495 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1498 /* disable Multiple Reads for debugging */
1499 tctl &= ~E1000_TCTL_MULR;
1502 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1503 tarc = E1000_READ_REG(hw, TARC0);
1504 tarc |= ((1 << 25) | (1 << 21));
1505 E1000_WRITE_REG(hw, TARC0, tarc);
1506 tarc = E1000_READ_REG(hw, TARC1);
1508 if (tctl & E1000_TCTL_MULR)
1512 E1000_WRITE_REG(hw, TARC1, tarc);
1513 } else if (hw->mac_type == e1000_80003es2lan) {
1514 tarc = E1000_READ_REG(hw, TARC0);
1516 E1000_WRITE_REG(hw, TARC0, tarc);
1517 tarc = E1000_READ_REG(hw, TARC1);
1519 E1000_WRITE_REG(hw, TARC1, tarc);
1522 e1000_config_collision_dist(hw);
1524 /* Setup Transmit Descriptor Settings for eop descriptor */
1525 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1528 if (hw->mac_type < e1000_82543)
1529 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1531 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1533 /* Cache if we're 82544 running in PCI-X because we'll
1534 * need this to apply a workaround later in the send path. */
1535 if (hw->mac_type == e1000_82544 &&
1536 hw->bus_type == e1000_bus_type_pcix)
1537 adapter->pcix_82544 = 1;
1539 E1000_WRITE_REG(hw, TCTL, tctl);
1544 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1545 * @adapter: board private structure
1546 * @rxdr: rx descriptor ring (for a specific queue) to setup
1548 * Returns 0 on success, negative on failure
1552 e1000_setup_rx_resources(struct e1000_adapter *adapter,
1553 struct e1000_rx_ring *rxdr)
1555 struct pci_dev *pdev = adapter->pdev;
1558 size = sizeof(struct e1000_buffer) * rxdr->count;
1559 rxdr->buffer_info = vmalloc(size);
1560 if (!rxdr->buffer_info) {
1562 "Unable to allocate memory for the receive descriptor ring\n");
1565 memset(rxdr->buffer_info, 0, size);
1567 size = sizeof(struct e1000_ps_page) * rxdr->count;
1568 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1569 if (!rxdr->ps_page) {
1570 vfree(rxdr->buffer_info);
1572 "Unable to allocate memory for the receive descriptor ring\n");
1575 memset(rxdr->ps_page, 0, size);
1577 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1578 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1579 if (!rxdr->ps_page_dma) {
1580 vfree(rxdr->buffer_info);
1581 kfree(rxdr->ps_page);
1583 "Unable to allocate memory for the receive descriptor ring\n");
1586 memset(rxdr->ps_page_dma, 0, size);
1588 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1589 desc_len = sizeof(struct e1000_rx_desc);
1591 desc_len = sizeof(union e1000_rx_desc_packet_split);
1593 /* Round up to nearest 4K */
1595 rxdr->size = rxdr->count * desc_len;
1596 E1000_ROUNDUP(rxdr->size, 4096);
1598 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1602 "Unable to allocate memory for the receive descriptor ring\n");
1604 vfree(rxdr->buffer_info);
1605 kfree(rxdr->ps_page);
1606 kfree(rxdr->ps_page_dma);
1610 /* Fix for errata 23, can't cross 64kB boundary */
1611 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1612 void *olddesc = rxdr->desc;
1613 dma_addr_t olddma = rxdr->dma;
1614 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1615 "at %p\n", rxdr->size, rxdr->desc);
1616 /* Try again, without freeing the previous */
1617 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1618 /* Failed allocation, critical failure */
1620 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1622 "Unable to allocate memory "
1623 "for the receive descriptor ring\n");
1624 goto setup_rx_desc_die;
1627 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1629 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1631 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1633 "Unable to allocate aligned memory "
1634 "for the receive descriptor ring\n");
1635 goto setup_rx_desc_die;
1637 /* Free old allocation, new allocation was successful */
1638 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1641 memset(rxdr->desc, 0, rxdr->size);
1643 rxdr->next_to_clean = 0;
1644 rxdr->next_to_use = 0;
1650 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1651 * (Descriptors) for all queues
1652 * @adapter: board private structure
1654 * If this function returns with an error, then it's possible one or
1655 * more of the rings is populated (while the rest are not). It is the
1656 * callers duty to clean those orphaned rings.
1658 * Return 0 on success, negative on failure
1662 e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1666 for (i = 0; i < adapter->num_rx_queues; i++) {
1667 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1670 "Allocation for Rx Queue %u failed\n", i);
1679 * e1000_setup_rctl - configure the receive control registers
1680 * @adapter: Board private structure
1682 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1683 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1685 e1000_setup_rctl(struct e1000_adapter *adapter)
1687 uint32_t rctl, rfctl;
1688 uint32_t psrctl = 0;
1689 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1693 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1695 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1697 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1698 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1699 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1701 if (adapter->hw.tbi_compatibility_on == 1)
1702 rctl |= E1000_RCTL_SBP;
1704 rctl &= ~E1000_RCTL_SBP;
1706 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1707 rctl &= ~E1000_RCTL_LPE;
1709 rctl |= E1000_RCTL_LPE;
1711 /* Setup buffer sizes */
1712 rctl &= ~E1000_RCTL_SZ_4096;
1713 rctl |= E1000_RCTL_BSEX;
1714 switch (adapter->rx_buffer_len) {
1715 case E1000_RXBUFFER_256:
1716 rctl |= E1000_RCTL_SZ_256;
1717 rctl &= ~E1000_RCTL_BSEX;
1719 case E1000_RXBUFFER_512:
1720 rctl |= E1000_RCTL_SZ_512;
1721 rctl &= ~E1000_RCTL_BSEX;
1723 case E1000_RXBUFFER_1024:
1724 rctl |= E1000_RCTL_SZ_1024;
1725 rctl &= ~E1000_RCTL_BSEX;
1727 case E1000_RXBUFFER_2048:
1729 rctl |= E1000_RCTL_SZ_2048;
1730 rctl &= ~E1000_RCTL_BSEX;
1732 case E1000_RXBUFFER_4096:
1733 rctl |= E1000_RCTL_SZ_4096;
1735 case E1000_RXBUFFER_8192:
1736 rctl |= E1000_RCTL_SZ_8192;
1738 case E1000_RXBUFFER_16384:
1739 rctl |= E1000_RCTL_SZ_16384;
1743 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1744 /* 82571 and greater support packet-split where the protocol
1745 * header is placed in skb->data and the packet data is
1746 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1747 * In the case of a non-split, skb->data is linearly filled,
1748 * followed by the page buffers. Therefore, skb->data is
1749 * sized to hold the largest protocol header.
1751 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1752 if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
1754 adapter->rx_ps_pages = pages;
1756 adapter->rx_ps_pages = 0;
1758 if (adapter->rx_ps_pages) {
1759 /* Configure extra packet-split registers */
1760 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1761 rfctl |= E1000_RFCTL_EXTEN;
1762 /* disable IPv6 packet split support */
1763 rfctl |= E1000_RFCTL_IPV6_DIS;
1764 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1766 rctl |= E1000_RCTL_DTYP_PS;
1768 psrctl |= adapter->rx_ps_bsize0 >>
1769 E1000_PSRCTL_BSIZE0_SHIFT;
1771 switch (adapter->rx_ps_pages) {
1773 psrctl |= PAGE_SIZE <<
1774 E1000_PSRCTL_BSIZE3_SHIFT;
1776 psrctl |= PAGE_SIZE <<
1777 E1000_PSRCTL_BSIZE2_SHIFT;
1779 psrctl |= PAGE_SIZE >>
1780 E1000_PSRCTL_BSIZE1_SHIFT;
1784 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1787 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1791 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1792 * @adapter: board private structure
1794 * Configure the Rx unit of the MAC after a reset.
1798 e1000_configure_rx(struct e1000_adapter *adapter)
1801 struct e1000_hw *hw = &adapter->hw;
1802 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1804 if (adapter->rx_ps_pages) {
1805 /* this is a 32 byte descriptor */
1806 rdlen = adapter->rx_ring[0].count *
1807 sizeof(union e1000_rx_desc_packet_split);
1808 adapter->clean_rx = e1000_clean_rx_irq_ps;
1809 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1811 rdlen = adapter->rx_ring[0].count *
1812 sizeof(struct e1000_rx_desc);
1813 adapter->clean_rx = e1000_clean_rx_irq;
1814 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1817 /* disable receives while setting up the descriptors */
1818 rctl = E1000_READ_REG(hw, RCTL);
1819 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1821 /* set the Receive Delay Timer Register */
1822 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1824 if (hw->mac_type >= e1000_82540) {
1825 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1826 if (adapter->itr > 1)
1827 E1000_WRITE_REG(hw, ITR,
1828 1000000000 / (adapter->itr * 256));
1831 if (hw->mac_type >= e1000_82571) {
1832 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1833 /* Reset delay timers after every interrupt */
1834 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1835 #ifdef CONFIG_E1000_NAPI
1836 /* Auto-Mask interrupts upon ICR read. */
1837 ctrl_ext |= E1000_CTRL_EXT_IAME;
1839 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1840 E1000_WRITE_REG(hw, IAM, ~0);
1841 E1000_WRITE_FLUSH(hw);
1844 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1845 * the Base and Length of the Rx Descriptor Ring */
1846 switch (adapter->num_rx_queues) {
1849 rdba = adapter->rx_ring[0].dma;
1850 E1000_WRITE_REG(hw, RDLEN, rdlen);
1851 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1852 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1853 E1000_WRITE_REG(hw, RDT, 0);
1854 E1000_WRITE_REG(hw, RDH, 0);
1855 adapter->rx_ring[0].rdh = E1000_RDH;
1856 adapter->rx_ring[0].rdt = E1000_RDT;
1860 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1861 if (hw->mac_type >= e1000_82543) {
1862 rxcsum = E1000_READ_REG(hw, RXCSUM);
1863 if (adapter->rx_csum == TRUE) {
1864 rxcsum |= E1000_RXCSUM_TUOFL;
1866 /* Enable 82571 IPv4 payload checksum for UDP fragments
1867 * Must be used in conjunction with packet-split. */
1868 if ((hw->mac_type >= e1000_82571) &&
1869 (adapter->rx_ps_pages)) {
1870 rxcsum |= E1000_RXCSUM_IPPCSE;
1873 rxcsum &= ~E1000_RXCSUM_TUOFL;
1874 /* don't need to clear IPPCSE as it defaults to 0 */
1876 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1879 /* Enable Receives */
1880 E1000_WRITE_REG(hw, RCTL, rctl);
1884 * e1000_free_tx_resources - Free Tx Resources per Queue
1885 * @adapter: board private structure
1886 * @tx_ring: Tx descriptor ring for a specific queue
1888 * Free all transmit software resources
1892 e1000_free_tx_resources(struct e1000_adapter *adapter,
1893 struct e1000_tx_ring *tx_ring)
1895 struct pci_dev *pdev = adapter->pdev;
1897 e1000_clean_tx_ring(adapter, tx_ring);
1899 vfree(tx_ring->buffer_info);
1900 tx_ring->buffer_info = NULL;
1902 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1904 tx_ring->desc = NULL;
1908 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1909 * @adapter: board private structure
1911 * Free all transmit software resources
1915 e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1919 for (i = 0; i < adapter->num_tx_queues; i++)
1920 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1924 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1925 struct e1000_buffer *buffer_info)
1927 if (buffer_info->dma) {
1928 pci_unmap_page(adapter->pdev,
1930 buffer_info->length,
1933 if (buffer_info->skb)
1934 dev_kfree_skb_any(buffer_info->skb);
1935 memset(buffer_info, 0, sizeof(struct e1000_buffer));
1939 * e1000_clean_tx_ring - Free Tx Buffers
1940 * @adapter: board private structure
1941 * @tx_ring: ring to be cleaned
1945 e1000_clean_tx_ring(struct e1000_adapter *adapter,
1946 struct e1000_tx_ring *tx_ring)
1948 struct e1000_buffer *buffer_info;
1952 /* Free all the Tx ring sk_buffs */
1954 for (i = 0; i < tx_ring->count; i++) {
1955 buffer_info = &tx_ring->buffer_info[i];
1956 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1959 size = sizeof(struct e1000_buffer) * tx_ring->count;
1960 memset(tx_ring->buffer_info, 0, size);
1962 /* Zero out the descriptor ring */
1964 memset(tx_ring->desc, 0, tx_ring->size);
1966 tx_ring->next_to_use = 0;
1967 tx_ring->next_to_clean = 0;
1968 tx_ring->last_tx_tso = 0;
1970 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
1971 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
1975 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1976 * @adapter: board private structure
1980 e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1984 for (i = 0; i < adapter->num_tx_queues; i++)
1985 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1989 * e1000_free_rx_resources - Free Rx Resources
1990 * @adapter: board private structure
1991 * @rx_ring: ring to clean the resources from
1993 * Free all receive software resources
1997 e1000_free_rx_resources(struct e1000_adapter *adapter,
1998 struct e1000_rx_ring *rx_ring)
2000 struct pci_dev *pdev = adapter->pdev;
2002 e1000_clean_rx_ring(adapter, rx_ring);
2004 vfree(rx_ring->buffer_info);
2005 rx_ring->buffer_info = NULL;
2006 kfree(rx_ring->ps_page);
2007 rx_ring->ps_page = NULL;
2008 kfree(rx_ring->ps_page_dma);
2009 rx_ring->ps_page_dma = NULL;
2011 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2013 rx_ring->desc = NULL;
2017 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2018 * @adapter: board private structure
2020 * Free all receive software resources
2024 e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2028 for (i = 0; i < adapter->num_rx_queues; i++)
2029 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2033 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2034 * @adapter: board private structure
2035 * @rx_ring: ring to free buffers from
2039 e1000_clean_rx_ring(struct e1000_adapter *adapter,
2040 struct e1000_rx_ring *rx_ring)
2042 struct e1000_buffer *buffer_info;
2043 struct e1000_ps_page *ps_page;
2044 struct e1000_ps_page_dma *ps_page_dma;
2045 struct pci_dev *pdev = adapter->pdev;
2049 /* Free all the Rx ring sk_buffs */
2050 for (i = 0; i < rx_ring->count; i++) {
2051 buffer_info = &rx_ring->buffer_info[i];
2052 if (buffer_info->skb) {
2053 pci_unmap_single(pdev,
2055 buffer_info->length,
2056 PCI_DMA_FROMDEVICE);
2058 dev_kfree_skb(buffer_info->skb);
2059 buffer_info->skb = NULL;
2061 ps_page = &rx_ring->ps_page[i];
2062 ps_page_dma = &rx_ring->ps_page_dma[i];
2063 for (j = 0; j < adapter->rx_ps_pages; j++) {
2064 if (!ps_page->ps_page[j]) break;
2065 pci_unmap_page(pdev,
2066 ps_page_dma->ps_page_dma[j],
2067 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2068 ps_page_dma->ps_page_dma[j] = 0;
2069 put_page(ps_page->ps_page[j]);
2070 ps_page->ps_page[j] = NULL;
2074 size = sizeof(struct e1000_buffer) * rx_ring->count;
2075 memset(rx_ring->buffer_info, 0, size);
2076 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2077 memset(rx_ring->ps_page, 0, size);
2078 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2079 memset(rx_ring->ps_page_dma, 0, size);
2081 /* Zero out the descriptor ring */
2083 memset(rx_ring->desc, 0, rx_ring->size);
2085 rx_ring->next_to_clean = 0;
2086 rx_ring->next_to_use = 0;
2088 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
2089 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
2093 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2094 * @adapter: board private structure
2098 e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2102 for (i = 0; i < adapter->num_rx_queues; i++)
2103 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2106 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2107 * and memory write and invalidate disabled for certain operations
2110 e1000_enter_82542_rst(struct e1000_adapter *adapter)
2112 struct net_device *netdev = adapter->netdev;
2115 e1000_pci_clear_mwi(&adapter->hw);
2117 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2118 rctl |= E1000_RCTL_RST;
2119 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2120 E1000_WRITE_FLUSH(&adapter->hw);
2123 if (netif_running(netdev))
2124 e1000_clean_all_rx_rings(adapter);
2128 e1000_leave_82542_rst(struct e1000_adapter *adapter)
2130 struct net_device *netdev = adapter->netdev;
2133 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2134 rctl &= ~E1000_RCTL_RST;
2135 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2136 E1000_WRITE_FLUSH(&adapter->hw);
2139 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2140 e1000_pci_set_mwi(&adapter->hw);
2142 if (netif_running(netdev)) {
2143 /* No need to loop, because 82542 supports only 1 queue */
2144 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2145 e1000_configure_rx(adapter);
2146 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2151 * e1000_set_mac - Change the Ethernet Address of the NIC
2152 * @netdev: network interface device structure
2153 * @p: pointer to an address structure
2155 * Returns 0 on success, negative on failure
2159 e1000_set_mac(struct net_device *netdev, void *p)
2161 struct e1000_adapter *adapter = netdev_priv(netdev);
2162 struct sockaddr *addr = p;
2164 if (!is_valid_ether_addr(addr->sa_data))
2165 return -EADDRNOTAVAIL;
2167 /* 82542 2.0 needs to be in reset to write receive address registers */
2169 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2170 e1000_enter_82542_rst(adapter);
2172 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2173 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
2175 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2177 /* With 82571 controllers, LAA may be overwritten (with the default)
2178 * due to controller reset from the other port. */
2179 if (adapter->hw.mac_type == e1000_82571) {
2180 /* activate the work around */
2181 adapter->hw.laa_is_present = 1;
2183 /* Hold a copy of the LAA in RAR[14] This is done so that
2184 * between the time RAR[0] gets clobbered and the time it
2185 * gets fixed (in e1000_watchdog), the actual LAA is in one
2186 * of the RARs and no incoming packets directed to this port
2187 * are dropped. Eventaully the LAA will be in RAR[0] and
2189 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2190 E1000_RAR_ENTRIES - 1);
2193 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2194 e1000_leave_82542_rst(adapter);
2200 * e1000_set_multi - Multicast and Promiscuous mode set
2201 * @netdev: network interface device structure
2203 * The set_multi entry point is called whenever the multicast address
2204 * list or the network interface flags are updated. This routine is
2205 * responsible for configuring the hardware for proper multicast,
2206 * promiscuous mode, and all-multi behavior.
2210 e1000_set_multi(struct net_device *netdev)
2212 struct e1000_adapter *adapter = netdev_priv(netdev);
2213 struct e1000_hw *hw = &adapter->hw;
2214 struct dev_mc_list *mc_ptr;
2216 uint32_t hash_value;
2217 int i, rar_entries = E1000_RAR_ENTRIES;
2218 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2219 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2220 E1000_NUM_MTA_REGISTERS;
2222 if (adapter->hw.mac_type == e1000_ich8lan)
2223 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2225 /* reserve RAR[14] for LAA over-write work-around */
2226 if (adapter->hw.mac_type == e1000_82571)
2229 /* Check for Promiscuous and All Multicast modes */
2231 rctl = E1000_READ_REG(hw, RCTL);
2233 if (netdev->flags & IFF_PROMISC) {
2234 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2235 } else if (netdev->flags & IFF_ALLMULTI) {
2236 rctl |= E1000_RCTL_MPE;
2237 rctl &= ~E1000_RCTL_UPE;
2239 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2242 E1000_WRITE_REG(hw, RCTL, rctl);
2244 /* 82542 2.0 needs to be in reset to write receive address registers */
2246 if (hw->mac_type == e1000_82542_rev2_0)
2247 e1000_enter_82542_rst(adapter);
2249 /* load the first 14 multicast address into the exact filters 1-14
2250 * RAR 0 is used for the station MAC adddress
2251 * if there are not 14 addresses, go ahead and clear the filters
2252 * -- with 82571 controllers only 0-13 entries are filled here
2254 mc_ptr = netdev->mc_list;
2256 for (i = 1; i < rar_entries; i++) {
2258 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2259 mc_ptr = mc_ptr->next;
2261 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2262 E1000_WRITE_FLUSH(hw);
2263 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2264 E1000_WRITE_FLUSH(hw);
2268 /* clear the old settings from the multicast hash table */
2270 for (i = 0; i < mta_reg_count; i++) {
2271 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2272 E1000_WRITE_FLUSH(hw);
2275 /* load any remaining addresses into the hash table */
2277 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2278 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2279 e1000_mta_set(hw, hash_value);
2282 if (hw->mac_type == e1000_82542_rev2_0)
2283 e1000_leave_82542_rst(adapter);
2286 /* Need to wait a few seconds after link up to get diagnostic information from
2290 e1000_update_phy_info(unsigned long data)
2292 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2293 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2297 * e1000_82547_tx_fifo_stall - Timer Call-back
2298 * @data: pointer to adapter cast into an unsigned long
2302 e1000_82547_tx_fifo_stall(unsigned long data)
2304 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2305 struct net_device *netdev = adapter->netdev;
2308 if (atomic_read(&adapter->tx_fifo_stall)) {
2309 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2310 E1000_READ_REG(&adapter->hw, TDH)) &&
2311 (E1000_READ_REG(&adapter->hw, TDFT) ==
2312 E1000_READ_REG(&adapter->hw, TDFH)) &&
2313 (E1000_READ_REG(&adapter->hw, TDFTS) ==
2314 E1000_READ_REG(&adapter->hw, TDFHS))) {
2315 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2316 E1000_WRITE_REG(&adapter->hw, TCTL,
2317 tctl & ~E1000_TCTL_EN);
2318 E1000_WRITE_REG(&adapter->hw, TDFT,
2319 adapter->tx_head_addr);
2320 E1000_WRITE_REG(&adapter->hw, TDFH,
2321 adapter->tx_head_addr);
2322 E1000_WRITE_REG(&adapter->hw, TDFTS,
2323 adapter->tx_head_addr);
2324 E1000_WRITE_REG(&adapter->hw, TDFHS,
2325 adapter->tx_head_addr);
2326 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2327 E1000_WRITE_FLUSH(&adapter->hw);
2329 adapter->tx_fifo_head = 0;
2330 atomic_set(&adapter->tx_fifo_stall, 0);
2331 netif_wake_queue(netdev);
2333 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2339 * e1000_watchdog - Timer Call-back
2340 * @data: pointer to adapter cast into an unsigned long
2343 e1000_watchdog(unsigned long data)
2345 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2346 struct net_device *netdev = adapter->netdev;
2347 struct e1000_tx_ring *txdr = adapter->tx_ring;
2348 uint32_t link, tctl;
2351 ret_val = e1000_check_for_link(&adapter->hw);
2352 if ((ret_val == E1000_ERR_PHY) &&
2353 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2354 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2355 /* See e1000_kumeran_lock_loss_workaround() */
2357 "Gigabit has been disabled, downgrading speed\n");
2359 if (adapter->hw.mac_type == e1000_82573) {
2360 e1000_enable_tx_pkt_filtering(&adapter->hw);
2361 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2362 e1000_update_mng_vlan(adapter);
2365 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2366 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2367 link = !adapter->hw.serdes_link_down;
2369 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2372 if (!netif_carrier_ok(netdev)) {
2373 boolean_t txb2b = 1;
2374 e1000_get_speed_and_duplex(&adapter->hw,
2375 &adapter->link_speed,
2376 &adapter->link_duplex);
2378 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
2379 adapter->link_speed,
2380 adapter->link_duplex == FULL_DUPLEX ?
2381 "Full Duplex" : "Half Duplex");
2383 /* tweak tx_queue_len according to speed/duplex
2384 * and adjust the timeout factor */
2385 netdev->tx_queue_len = adapter->tx_queue_len;
2386 adapter->tx_timeout_factor = 1;
2387 switch (adapter->link_speed) {
2390 netdev->tx_queue_len = 10;
2391 adapter->tx_timeout_factor = 8;
2395 netdev->tx_queue_len = 100;
2396 /* maybe add some timeout factor ? */
2400 if ((adapter->hw.mac_type == e1000_82571 ||
2401 adapter->hw.mac_type == e1000_82572) &&
2403 #define SPEED_MODE_BIT (1 << 21)
2405 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2406 tarc0 &= ~SPEED_MODE_BIT;
2407 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2411 /* disable TSO for pcie and 10/100 speeds, to avoid
2412 * some hardware issues */
2413 if (!adapter->tso_force &&
2414 adapter->hw.bus_type == e1000_bus_type_pci_express){
2415 switch (adapter->link_speed) {
2419 "10/100 speed: disabling TSO\n");
2420 netdev->features &= ~NETIF_F_TSO;
2423 netdev->features |= NETIF_F_TSO;
2432 /* enable transmits in the hardware, need to do this
2433 * after setting TARC0 */
2434 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2435 tctl |= E1000_TCTL_EN;
2436 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2438 netif_carrier_on(netdev);
2439 netif_wake_queue(netdev);
2440 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2441 adapter->smartspeed = 0;
2444 if (netif_carrier_ok(netdev)) {
2445 adapter->link_speed = 0;
2446 adapter->link_duplex = 0;
2447 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2448 netif_carrier_off(netdev);
2449 netif_stop_queue(netdev);
2450 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2452 /* 80003ES2LAN workaround--
2453 * For packet buffer work-around on link down event;
2454 * disable receives in the ISR and
2455 * reset device here in the watchdog
2457 if (adapter->hw.mac_type == e1000_80003es2lan)
2459 schedule_work(&adapter->reset_task);
2462 e1000_smartspeed(adapter);
2465 e1000_update_stats(adapter);
2467 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2468 adapter->tpt_old = adapter->stats.tpt;
2469 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
2470 adapter->colc_old = adapter->stats.colc;
2472 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2473 adapter->gorcl_old = adapter->stats.gorcl;
2474 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2475 adapter->gotcl_old = adapter->stats.gotcl;
2477 e1000_update_adaptive(&adapter->hw);
2479 if (!netif_carrier_ok(netdev)) {
2480 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2481 /* We've lost link, so the controller stops DMA,
2482 * but we've got queued Tx work that's never going
2483 * to get done, so reset controller to flush Tx.
2484 * (Do the reset outside of interrupt context). */
2485 adapter->tx_timeout_count++;
2486 schedule_work(&adapter->reset_task);
2490 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2491 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2492 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2493 * asymmetrical Tx or Rx gets ITR=8000; everyone
2494 * else is between 2000-8000. */
2495 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2496 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2497 adapter->gotcl - adapter->gorcl :
2498 adapter->gorcl - adapter->gotcl) / 10000;
2499 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2500 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
2503 /* Cause software interrupt to ensure rx ring is cleaned */
2504 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2506 /* Force detection of hung controller every watchdog period */
2507 adapter->detect_tx_hung = TRUE;
2509 /* With 82571 controllers, LAA may be overwritten due to controller
2510 * reset from the other port. Set the appropriate LAA in RAR[0] */
2511 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2512 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2514 /* Reset the timer */
2515 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2518 #define E1000_TX_FLAGS_CSUM 0x00000001
2519 #define E1000_TX_FLAGS_VLAN 0x00000002
2520 #define E1000_TX_FLAGS_TSO 0x00000004
2521 #define E1000_TX_FLAGS_IPV4 0x00000008
2522 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2523 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2526 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2527 struct sk_buff *skb)
2530 struct e1000_context_desc *context_desc;
2531 struct e1000_buffer *buffer_info;
2533 uint32_t cmd_length = 0;
2534 uint16_t ipcse = 0, tucse, mss;
2535 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2538 if (skb_is_gso(skb)) {
2539 if (skb_header_cloned(skb)) {
2540 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2545 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2546 mss = skb_shinfo(skb)->gso_size;
2547 if (skb->protocol == htons(ETH_P_IP)) {
2548 skb->nh.iph->tot_len = 0;
2549 skb->nh.iph->check = 0;
2551 ~csum_tcpudp_magic(skb->nh.iph->saddr,
2556 cmd_length = E1000_TXD_CMD_IP;
2557 ipcse = skb->h.raw - skb->data - 1;
2558 #ifdef NETIF_F_TSO_IPV6
2559 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2560 skb->nh.ipv6h->payload_len = 0;
2562 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
2563 &skb->nh.ipv6h->daddr,
2570 ipcss = skb->nh.raw - skb->data;
2571 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
2572 tucss = skb->h.raw - skb->data;
2573 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
2576 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2577 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2579 i = tx_ring->next_to_use;
2580 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2581 buffer_info = &tx_ring->buffer_info[i];
2583 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2584 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2585 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2586 context_desc->upper_setup.tcp_fields.tucss = tucss;
2587 context_desc->upper_setup.tcp_fields.tucso = tucso;
2588 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2589 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2590 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2591 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2593 buffer_info->time_stamp = jiffies;
2595 if (++i == tx_ring->count) i = 0;
2596 tx_ring->next_to_use = i;
2606 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2607 struct sk_buff *skb)
2609 struct e1000_context_desc *context_desc;
2610 struct e1000_buffer *buffer_info;
2614 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2615 css = skb->h.raw - skb->data;
2617 i = tx_ring->next_to_use;
2618 buffer_info = &tx_ring->buffer_info[i];
2619 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2621 context_desc->upper_setup.tcp_fields.tucss = css;
2622 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
2623 context_desc->upper_setup.tcp_fields.tucse = 0;
2624 context_desc->tcp_seg_setup.data = 0;
2625 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2627 buffer_info->time_stamp = jiffies;
2629 if (unlikely(++i == tx_ring->count)) i = 0;
2630 tx_ring->next_to_use = i;
2638 #define E1000_MAX_TXD_PWR 12
2639 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2642 e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2643 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
2644 unsigned int nr_frags, unsigned int mss)
2646 struct e1000_buffer *buffer_info;
2647 unsigned int len = skb->len;
2648 unsigned int offset = 0, size, count = 0, i;
2650 len -= skb->data_len;
2652 i = tx_ring->next_to_use;
2655 buffer_info = &tx_ring->buffer_info[i];
2656 size = min(len, max_per_txd);
2658 /* Workaround for Controller erratum --
2659 * descriptor for non-tso packet in a linear SKB that follows a
2660 * tso gets written back prematurely before the data is fully
2661 * DMA'd to the controller */
2662 if (!skb->data_len && tx_ring->last_tx_tso &&
2664 tx_ring->last_tx_tso = 0;
2668 /* Workaround for premature desc write-backs
2669 * in TSO mode. Append 4-byte sentinel desc */
2670 if (unlikely(mss && !nr_frags && size == len && size > 8))
2673 /* work-around for errata 10 and it applies
2674 * to all controllers in PCI-X mode
2675 * The fix is to make sure that the first descriptor of a
2676 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2678 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2679 (size > 2015) && count == 0))
2682 /* Workaround for potential 82544 hang in PCI-X. Avoid
2683 * terminating buffers within evenly-aligned dwords. */
2684 if (unlikely(adapter->pcix_82544 &&
2685 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2689 buffer_info->length = size;
2691 pci_map_single(adapter->pdev,
2695 buffer_info->time_stamp = jiffies;
2700 if (unlikely(++i == tx_ring->count)) i = 0;
2703 for (f = 0; f < nr_frags; f++) {
2704 struct skb_frag_struct *frag;
2706 frag = &skb_shinfo(skb)->frags[f];
2708 offset = frag->page_offset;
2711 buffer_info = &tx_ring->buffer_info[i];
2712 size = min(len, max_per_txd);
2714 /* Workaround for premature desc write-backs
2715 * in TSO mode. Append 4-byte sentinel desc */
2716 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2719 /* Workaround for potential 82544 hang in PCI-X.
2720 * Avoid terminating buffers within evenly-aligned
2722 if (unlikely(adapter->pcix_82544 &&
2723 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2727 buffer_info->length = size;
2729 pci_map_page(adapter->pdev,
2734 buffer_info->time_stamp = jiffies;
2739 if (unlikely(++i == tx_ring->count)) i = 0;
2743 i = (i == 0) ? tx_ring->count - 1 : i - 1;
2744 tx_ring->buffer_info[i].skb = skb;
2745 tx_ring->buffer_info[first].next_to_watch = i;
2751 e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2752 int tx_flags, int count)
2754 struct e1000_tx_desc *tx_desc = NULL;
2755 struct e1000_buffer *buffer_info;
2756 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2759 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2760 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2762 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2764 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2765 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2768 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2769 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2770 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2773 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2774 txd_lower |= E1000_TXD_CMD_VLE;
2775 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2778 i = tx_ring->next_to_use;
2781 buffer_info = &tx_ring->buffer_info[i];
2782 tx_desc = E1000_TX_DESC(*tx_ring, i);
2783 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2784 tx_desc->lower.data =
2785 cpu_to_le32(txd_lower | buffer_info->length);
2786 tx_desc->upper.data = cpu_to_le32(txd_upper);
2787 if (unlikely(++i == tx_ring->count)) i = 0;
2790 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2792 /* Force memory writes to complete before letting h/w
2793 * know there are new descriptors to fetch. (Only
2794 * applicable for weak-ordered memory model archs,
2795 * such as IA-64). */
2798 tx_ring->next_to_use = i;
2799 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
2803 * 82547 workaround to avoid controller hang in half-duplex environment.
2804 * The workaround is to avoid queuing a large packet that would span
2805 * the internal Tx FIFO ring boundary by notifying the stack to resend
2806 * the packet at a later time. This gives the Tx FIFO an opportunity to
2807 * flush all packets. When that occurs, we reset the Tx FIFO pointers
2808 * to the beginning of the Tx FIFO.
2811 #define E1000_FIFO_HDR 0x10
2812 #define E1000_82547_PAD_LEN 0x3E0
2815 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2817 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2818 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
2820 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2822 if (adapter->link_duplex != HALF_DUPLEX)
2823 goto no_fifo_stall_required;
2825 if (atomic_read(&adapter->tx_fifo_stall))
2828 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2829 atomic_set(&adapter->tx_fifo_stall, 1);
2833 no_fifo_stall_required:
2834 adapter->tx_fifo_head += skb_fifo_len;
2835 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2836 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2840 #define MINIMUM_DHCP_PACKET_SIZE 282
2842 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2844 struct e1000_hw *hw = &adapter->hw;
2845 uint16_t length, offset;
2846 if (vlan_tx_tag_present(skb)) {
2847 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2848 ( adapter->hw.mng_cookie.status &
2849 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2852 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
2853 struct ethhdr *eth = (struct ethhdr *) skb->data;
2854 if ((htons(ETH_P_IP) == eth->h_proto)) {
2855 const struct iphdr *ip =
2856 (struct iphdr *)((uint8_t *)skb->data+14);
2857 if (IPPROTO_UDP == ip->protocol) {
2858 struct udphdr *udp =
2859 (struct udphdr *)((uint8_t *)ip +
2861 if (ntohs(udp->dest) == 67) {
2862 offset = (uint8_t *)udp + 8 - skb->data;
2863 length = skb->len - offset;
2865 return e1000_mng_write_dhcp_info(hw,
2875 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
2877 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2879 struct e1000_adapter *adapter = netdev_priv(netdev);
2880 struct e1000_tx_ring *tx_ring;
2881 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2882 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2883 unsigned int tx_flags = 0;
2884 unsigned int len = skb->len;
2885 unsigned long flags;
2886 unsigned int nr_frags = 0;
2887 unsigned int mss = 0;
2891 len -= skb->data_len;
2893 tx_ring = adapter->tx_ring;
2895 if (unlikely(skb->len <= 0)) {
2896 dev_kfree_skb_any(skb);
2897 return NETDEV_TX_OK;
2901 mss = skb_shinfo(skb)->gso_size;
2902 /* The controller does a simple calculation to
2903 * make sure there is enough room in the FIFO before
2904 * initiating the DMA for each buffer. The calc is:
2905 * 4 = ceil(buffer len/mss). To make sure we don't
2906 * overrun the FIFO, adjust the max buffer len if mss
2910 max_per_txd = min(mss << 2, max_per_txd);
2911 max_txd_pwr = fls(max_per_txd) - 1;
2913 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
2914 * points to just header, pull a few bytes of payload from
2915 * frags into skb->data */
2916 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2917 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
2918 switch (adapter->hw.mac_type) {
2919 unsigned int pull_size;
2924 pull_size = min((unsigned int)4, skb->data_len);
2925 if (!__pskb_pull_tail(skb, pull_size)) {
2927 "__pskb_pull_tail failed.\n");
2928 dev_kfree_skb_any(skb);
2929 return NETDEV_TX_OK;
2931 len = skb->len - skb->data_len;
2940 /* reserve a descriptor for the offload context */
2941 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2945 if (skb->ip_summed == CHECKSUM_HW)
2950 /* Controller Erratum workaround */
2951 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
2955 count += TXD_USE_COUNT(len, max_txd_pwr);
2957 if (adapter->pcix_82544)
2960 /* work-around for errata 10 and it applies to all controllers
2961 * in PCI-X mode, so add one more descriptor to the count
2963 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2967 nr_frags = skb_shinfo(skb)->nr_frags;
2968 for (f = 0; f < nr_frags; f++)
2969 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2971 if (adapter->pcix_82544)
2975 if (adapter->hw.tx_pkt_filtering &&
2976 (adapter->hw.mac_type == e1000_82573))
2977 e1000_transfer_dhcp_info(adapter, skb);
2979 local_irq_save(flags);
2980 if (!spin_trylock(&tx_ring->tx_lock)) {
2981 /* Collision - tell upper layer to requeue */
2982 local_irq_restore(flags);
2983 return NETDEV_TX_LOCKED;
2986 /* need: count + 2 desc gap to keep tail from touching
2987 * head, otherwise try next time */
2988 if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
2989 netif_stop_queue(netdev);
2990 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2991 return NETDEV_TX_BUSY;
2994 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2995 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2996 netif_stop_queue(netdev);
2997 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2998 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2999 return NETDEV_TX_BUSY;
3003 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3004 tx_flags |= E1000_TX_FLAGS_VLAN;
3005 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3008 first = tx_ring->next_to_use;
3010 tso = e1000_tso(adapter, tx_ring, skb);
3012 dev_kfree_skb_any(skb);
3013 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3014 return NETDEV_TX_OK;
3018 tx_ring->last_tx_tso = 1;
3019 tx_flags |= E1000_TX_FLAGS_TSO;
3020 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3021 tx_flags |= E1000_TX_FLAGS_CSUM;
3023 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3024 * 82571 hardware supports TSO capabilities for IPv6 as well...
3025 * no longer assume, we must. */
3026 if (likely(skb->protocol == htons(ETH_P_IP)))
3027 tx_flags |= E1000_TX_FLAGS_IPV4;
3029 e1000_tx_queue(adapter, tx_ring, tx_flags,
3030 e1000_tx_map(adapter, tx_ring, skb, first,
3031 max_per_txd, nr_frags, mss));
3033 netdev->trans_start = jiffies;
3035 /* Make sure there is space in the ring for the next send. */
3036 if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
3037 netif_stop_queue(netdev);
3039 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3040 return NETDEV_TX_OK;
3044 * e1000_tx_timeout - Respond to a Tx Hang
3045 * @netdev: network interface device structure
3049 e1000_tx_timeout(struct net_device *netdev)
3051 struct e1000_adapter *adapter = netdev_priv(netdev);
3053 /* Do the reset outside of interrupt context */
3054 adapter->tx_timeout_count++;
3055 schedule_work(&adapter->reset_task);
3059 e1000_reset_task(struct net_device *netdev)
3061 struct e1000_adapter *adapter = netdev_priv(netdev);
3063 e1000_reinit_locked(adapter);
3067 * e1000_get_stats - Get System Network Statistics
3068 * @netdev: network interface device structure
3070 * Returns the address of the device statistics structure.
3071 * The statistics are actually updated from the timer callback.
3074 static struct net_device_stats *
3075 e1000_get_stats(struct net_device *netdev)
3077 struct e1000_adapter *adapter = netdev_priv(netdev);
3079 /* only return the current stats */
3080 return &adapter->net_stats;
3084 * e1000_change_mtu - Change the Maximum Transfer Unit
3085 * @netdev: network interface device structure
3086 * @new_mtu: new value for maximum frame size
3088 * Returns 0 on success, negative on failure
3092 e1000_change_mtu(struct net_device *netdev, int new_mtu)
3094 struct e1000_adapter *adapter = netdev_priv(netdev);
3095 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3096 uint16_t eeprom_data = 0;
3098 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3099 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3100 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3104 /* Adapter-specific max frame size limits. */
3105 switch (adapter->hw.mac_type) {
3106 case e1000_undefined ... e1000_82542_rev2_1:
3108 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3109 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3114 /* only enable jumbo frames if ASPM is disabled completely
3115 * this means both bits must be zero in 0x1A bits 3:2 */
3116 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
3118 if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
3119 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3121 "Jumbo Frames not supported.\n");
3126 /* fall through to get support */
3129 case e1000_80003es2lan:
3130 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3131 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3132 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3137 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3141 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3142 * means we reserve 2 more, this pushes us to allocate from the next
3144 * i.e. RXBUFFER_2048 --> size-4096 slab */
3146 if (max_frame <= E1000_RXBUFFER_256)
3147 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3148 else if (max_frame <= E1000_RXBUFFER_512)
3149 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3150 else if (max_frame <= E1000_RXBUFFER_1024)
3151 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3152 else if (max_frame <= E1000_RXBUFFER_2048)
3153 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3154 else if (max_frame <= E1000_RXBUFFER_4096)
3155 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3156 else if (max_frame <= E1000_RXBUFFER_8192)
3157 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3158 else if (max_frame <= E1000_RXBUFFER_16384)
3159 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3161 /* adjust allocation if LPE protects us, and we aren't using SBP */
3162 if (!adapter->hw.tbi_compatibility_on &&
3163 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3164 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3165 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3167 netdev->mtu = new_mtu;
3169 if (netif_running(netdev))
3170 e1000_reinit_locked(adapter);
3172 adapter->hw.max_frame_size = max_frame;
3178 * e1000_update_stats - Update the board statistics counters
3179 * @adapter: board private structure
3183 e1000_update_stats(struct e1000_adapter *adapter)
3185 struct e1000_hw *hw = &adapter->hw;
3186 struct pci_dev *pdev = adapter->pdev;
3187 unsigned long flags;
3190 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3193 * Prevent stats update while adapter is being reset, or if the pci
3194 * connection is down.
3196 if (adapter->link_speed == 0)
3198 if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
3201 spin_lock_irqsave(&adapter->stats_lock, flags);
3203 /* these counters are modified from e1000_adjust_tbi_stats,
3204 * called from the interrupt context, so they must only
3205 * be written while holding adapter->stats_lock
3208 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
3209 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
3210 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
3211 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
3212 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3213 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3214 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3216 if (adapter->hw.mac_type != e1000_ich8lan) {
3217 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3218 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3219 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3220 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3221 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3222 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3225 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3226 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
3227 adapter->stats.scc += E1000_READ_REG(hw, SCC);
3228 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
3229 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
3230 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
3231 adapter->stats.dc += E1000_READ_REG(hw, DC);
3232 adapter->stats.sec += E1000_READ_REG(hw, SEC);
3233 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
3234 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
3235 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
3236 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
3237 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
3238 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
3239 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
3240 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
3241 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
3242 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
3243 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
3244 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
3245 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
3246 adapter->stats.torl += E1000_READ_REG(hw, TORL);
3247 adapter->stats.torh += E1000_READ_REG(hw, TORH);
3248 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3249 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3250 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3252 if (adapter->hw.mac_type != e1000_ich8lan) {
3253 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3254 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3255 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3256 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3257 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3258 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3261 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3262 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3264 /* used for adaptive IFS */
3266 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
3267 adapter->stats.tpt += hw->tx_packet_delta;
3268 hw->collision_delta = E1000_READ_REG(hw, COLC);
3269 adapter->stats.colc += hw->collision_delta;
3271 if (hw->mac_type >= e1000_82543) {
3272 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3273 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3274 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
3275 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
3276 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3277 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3279 if (hw->mac_type > e1000_82547_rev_2) {
3280 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3281 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3283 if (adapter->hw.mac_type != e1000_ich8lan) {
3284 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3285 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3286 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
3287 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
3288 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3289 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3290 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3294 /* Fill out the OS statistics structure */
3296 adapter->net_stats.rx_packets = adapter->stats.gprc;
3297 adapter->net_stats.tx_packets = adapter->stats.gptc;
3298 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
3299 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
3300 adapter->net_stats.multicast = adapter->stats.mprc;
3301 adapter->net_stats.collisions = adapter->stats.colc;
3305 /* RLEC on some newer hardware can be incorrect so build
3306 * our own version based on RUC and ROC */
3307 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3308 adapter->stats.crcerrs + adapter->stats.algnerrc +
3309 adapter->stats.ruc + adapter->stats.roc +
3310 adapter->stats.cexterr;
3311 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3313 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3314 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3315 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3319 adapter->net_stats.tx_errors = adapter->stats.ecol +
3320 adapter->stats.latecol;
3321 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3322 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3323 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3325 /* Tx Dropped needs to be maintained elsewhere */
3329 if (hw->media_type == e1000_media_type_copper) {
3330 if ((adapter->link_speed == SPEED_1000) &&
3331 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3332 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3333 adapter->phy_stats.idle_errors += phy_tmp;
3336 if ((hw->mac_type <= e1000_82546) &&
3337 (hw->phy_type == e1000_phy_m88) &&
3338 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3339 adapter->phy_stats.receive_errors += phy_tmp;
3342 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3346 * e1000_intr - Interrupt Handler
3347 * @irq: interrupt number
3348 * @data: pointer to a network interface device structure
3349 * @pt_regs: CPU registers structure
3353 e1000_intr(int irq, void *data, struct pt_regs *regs)
3355 struct net_device *netdev = data;
3356 struct e1000_adapter *adapter = netdev_priv(netdev);
3357 struct e1000_hw *hw = &adapter->hw;
3358 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
3359 #ifndef CONFIG_E1000_NAPI
3362 /* Interrupt Auto-Mask...upon reading ICR,
3363 * interrupts are masked. No need for the
3364 * IMC write, but it does mean we should
3365 * account for it ASAP. */
3366 if (likely(hw->mac_type >= e1000_82571))
3367 atomic_inc(&adapter->irq_sem);
3370 if (unlikely(!icr)) {
3371 #ifdef CONFIG_E1000_NAPI
3372 if (hw->mac_type >= e1000_82571)
3373 e1000_irq_enable(adapter);
3375 return IRQ_NONE; /* Not our interrupt */
3378 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3379 hw->get_link_status = 1;
3380 /* 80003ES2LAN workaround--
3381 * For packet buffer work-around on link down event;
3382 * disable receives here in the ISR and
3383 * reset adapter in watchdog
3385 if (netif_carrier_ok(netdev) &&
3386 (adapter->hw.mac_type == e1000_80003es2lan)) {
3387 /* disable receives */
3388 rctl = E1000_READ_REG(hw, RCTL);
3389 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3391 mod_timer(&adapter->watchdog_timer, jiffies);
3394 #ifdef CONFIG_E1000_NAPI
3395 if (unlikely(hw->mac_type < e1000_82571)) {
3396 atomic_inc(&adapter->irq_sem);
3397 E1000_WRITE_REG(hw, IMC, ~0);
3398 E1000_WRITE_FLUSH(hw);
3400 if (likely(netif_rx_schedule_prep(netdev)))
3401 __netif_rx_schedule(netdev);
3403 e1000_irq_enable(adapter);
3405 /* Writing IMC and IMS is needed for 82547.
3406 * Due to Hub Link bus being occupied, an interrupt
3407 * de-assertion message is not able to be sent.
3408 * When an interrupt assertion message is generated later,
3409 * two messages are re-ordered and sent out.
3410 * That causes APIC to think 82547 is in de-assertion
3411 * state, while 82547 is in assertion state, resulting
3412 * in dead lock. Writing IMC forces 82547 into
3413 * de-assertion state.
3415 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3416 atomic_inc(&adapter->irq_sem);
3417 E1000_WRITE_REG(hw, IMC, ~0);
3420 for (i = 0; i < E1000_MAX_INTR; i++)
3421 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3422 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3425 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3426 e1000_irq_enable(adapter);
3433 #ifdef CONFIG_E1000_NAPI
3435 * e1000_clean - NAPI Rx polling callback
3436 * @adapter: board private structure
3440 e1000_clean(struct net_device *poll_dev, int *budget)
3442 struct e1000_adapter *adapter;
3443 int work_to_do = min(*budget, poll_dev->quota);
3444 int tx_cleaned = 0, work_done = 0;
3446 /* Must NOT use netdev_priv macro here. */
3447 adapter = poll_dev->priv;
3449 /* Keep link state information with original netdev */
3450 if (!netif_carrier_ok(poll_dev))
3453 /* e1000_clean is called per-cpu. This lock protects
3454 * tx_ring[0] from being cleaned by multiple cpus
3455 * simultaneously. A failure obtaining the lock means
3456 * tx_ring[0] is currently being cleaned anyway. */
3457 if (spin_trylock(&adapter->tx_queue_lock)) {
3458 tx_cleaned = e1000_clean_tx_irq(adapter,
3459 &adapter->tx_ring[0]);
3460 spin_unlock(&adapter->tx_queue_lock);
3463 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3464 &work_done, work_to_do);
3466 *budget -= work_done;
3467 poll_dev->quota -= work_done;
3469 /* If no Tx and not enough Rx work done, exit the polling mode */
3470 if ((!tx_cleaned && (work_done == 0)) ||
3471 !netif_running(poll_dev)) {
3473 netif_rx_complete(poll_dev);
3474 e1000_irq_enable(adapter);
3483 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3484 * @adapter: board private structure
3488 e1000_clean_tx_irq(struct e1000_adapter *adapter,
3489 struct e1000_tx_ring *tx_ring)
3491 struct net_device *netdev = adapter->netdev;
3492 struct e1000_tx_desc *tx_desc, *eop_desc;
3493 struct e1000_buffer *buffer_info;
3494 unsigned int i, eop;
3495 #ifdef CONFIG_E1000_NAPI
3496 unsigned int count = 0;
3498 boolean_t cleaned = FALSE;
3500 i = tx_ring->next_to_clean;
3501 eop = tx_ring->buffer_info[i].next_to_watch;
3502 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3504 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3505 for (cleaned = FALSE; !cleaned; ) {
3506 tx_desc = E1000_TX_DESC(*tx_ring, i);
3507 buffer_info = &tx_ring->buffer_info[i];
3508 cleaned = (i == eop);
3510 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3511 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3513 if (unlikely(++i == tx_ring->count)) i = 0;
3517 eop = tx_ring->buffer_info[i].next_to_watch;
3518 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3519 #ifdef CONFIG_E1000_NAPI
3520 #define E1000_TX_WEIGHT 64
3521 /* weight of a sort for tx, to avoid endless transmit cleanup */
3522 if (count++ == E1000_TX_WEIGHT) break;
3526 tx_ring->next_to_clean = i;
3528 #define TX_WAKE_THRESHOLD 32
3529 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3530 netif_carrier_ok(netdev))) {
3531 spin_lock(&tx_ring->tx_lock);
3532 if (netif_queue_stopped(netdev) &&
3533 (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
3534 netif_wake_queue(netdev);
3535 spin_unlock(&tx_ring->tx_lock);
3538 if (adapter->detect_tx_hung) {
3539 /* Detect a transmit hang in hardware, this serializes the
3540 * check with the clearing of time_stamp and movement of i */
3541 adapter->detect_tx_hung = FALSE;
3542 if (tx_ring->buffer_info[eop].dma &&
3543 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3544 (adapter->tx_timeout_factor * HZ))
3545 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3546 E1000_STATUS_TXOFF)) {
3548 /* detected Tx unit hang */
3549 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3553 " next_to_use <%x>\n"
3554 " next_to_clean <%x>\n"
3555 "buffer_info[next_to_clean]\n"
3556 " time_stamp <%lx>\n"
3557 " next_to_watch <%x>\n"
3559 " next_to_watch.status <%x>\n",
3560 (unsigned long)((tx_ring - adapter->tx_ring) /
3561 sizeof(struct e1000_tx_ring)),
3562 readl(adapter->hw.hw_addr + tx_ring->tdh),
3563 readl(adapter->hw.hw_addr + tx_ring->tdt),
3564 tx_ring->next_to_use,
3565 tx_ring->next_to_clean,
3566 tx_ring->buffer_info[eop].time_stamp,
3569 eop_desc->upper.fields.status);
3570 netif_stop_queue(netdev);
3577 * e1000_rx_checksum - Receive Checksum Offload for 82543
3578 * @adapter: board private structure
3579 * @status_err: receive descriptor status and error fields
3580 * @csum: receive descriptor csum field
3581 * @sk_buff: socket buffer with received data
3585 e1000_rx_checksum(struct e1000_adapter *adapter,
3586 uint32_t status_err, uint32_t csum,
3587 struct sk_buff *skb)
3589 uint16_t status = (uint16_t)status_err;
3590 uint8_t errors = (uint8_t)(status_err >> 24);
3591 skb->ip_summed = CHECKSUM_NONE;
3593 /* 82543 or newer only */
3594 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3595 /* Ignore Checksum bit is set */
3596 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3597 /* TCP/UDP checksum error bit is set */
3598 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3599 /* let the stack verify checksum errors */
3600 adapter->hw_csum_err++;
3603 /* TCP/UDP Checksum has not been calculated */
3604 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3605 if (!(status & E1000_RXD_STAT_TCPCS))
3608 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3611 /* It must be a TCP or UDP packet with a valid checksum */
3612 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3613 /* TCP checksum is good */
3614 skb->ip_summed = CHECKSUM_UNNECESSARY;
3615 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
3616 /* IP fragment with UDP payload */
3617 /* Hardware complements the payload checksum, so we undo it
3618 * and then put the value in host order for further stack use.
3620 csum = ntohl(csum ^ 0xFFFF);
3622 skb->ip_summed = CHECKSUM_HW;
3624 adapter->hw_csum_good++;
3628 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3629 * @adapter: board private structure
3633 #ifdef CONFIG_E1000_NAPI
3634 e1000_clean_rx_irq(struct e1000_adapter *adapter,
3635 struct e1000_rx_ring *rx_ring,
3636 int *work_done, int work_to_do)
3638 e1000_clean_rx_irq(struct e1000_adapter *adapter,
3639 struct e1000_rx_ring *rx_ring)
3642 struct net_device *netdev = adapter->netdev;
3643 struct pci_dev *pdev = adapter->pdev;
3644 struct e1000_rx_desc *rx_desc, *next_rxd;
3645 struct e1000_buffer *buffer_info, *next_buffer;
3646 unsigned long flags;
3650 int cleaned_count = 0;
3651 boolean_t cleaned = FALSE;
3653 i = rx_ring->next_to_clean;
3654 rx_desc = E1000_RX_DESC(*rx_ring, i);
3655 buffer_info = &rx_ring->buffer_info[i];
3657 while (rx_desc->status & E1000_RXD_STAT_DD) {
3658 struct sk_buff *skb;
3660 #ifdef CONFIG_E1000_NAPI
3661 if (*work_done >= work_to_do)
3665 status = rx_desc->status;
3666 skb = buffer_info->skb;
3667 buffer_info->skb = NULL;
3669 prefetch(skb->data - NET_IP_ALIGN);
3671 if (++i == rx_ring->count) i = 0;
3672 next_rxd = E1000_RX_DESC(*rx_ring, i);
3675 next_buffer = &rx_ring->buffer_info[i];
3679 pci_unmap_single(pdev,
3681 buffer_info->length,
3682 PCI_DMA_FROMDEVICE);
3684 length = le16_to_cpu(rx_desc->length);
3686 /* adjust length to remove Ethernet CRC */
3689 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3690 /* All receives must fit into a single buffer */
3691 E1000_DBG("%s: Receive packet consumed multiple"
3692 " buffers\n", netdev->name);
3694 buffer_info->skb = skb;
3698 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3699 last_byte = *(skb->data + length - 1);
3700 if (TBI_ACCEPT(&adapter->hw, status,
3701 rx_desc->errors, length, last_byte)) {
3702 spin_lock_irqsave(&adapter->stats_lock, flags);
3703 e1000_tbi_adjust_stats(&adapter->hw,
3706 spin_unlock_irqrestore(&adapter->stats_lock,
3711 buffer_info->skb = skb;
3716 /* code added for copybreak, this should improve
3717 * performance for small packets with large amounts
3718 * of reassembly being done in the stack */
3719 #define E1000_CB_LENGTH 256
3720 if (length < E1000_CB_LENGTH) {
3721 struct sk_buff *new_skb =
3722 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
3724 skb_reserve(new_skb, NET_IP_ALIGN);
3725 new_skb->dev = netdev;
3726 memcpy(new_skb->data - NET_IP_ALIGN,
3727 skb->data - NET_IP_ALIGN,
3728 length + NET_IP_ALIGN);
3729 /* save the skb in buffer_info as good */
3730 buffer_info->skb = skb;
3732 skb_put(skb, length);
3735 skb_put(skb, length);
3737 /* end copybreak code */
3739 /* Receive Checksum Offload */
3740 e1000_rx_checksum(adapter,
3741 (uint32_t)(status) |
3742 ((uint32_t)(rx_desc->errors) << 24),
3743 le16_to_cpu(rx_desc->csum), skb);
3745 skb->protocol = eth_type_trans(skb, netdev);
3746 #ifdef CONFIG_E1000_NAPI
3747 if (unlikely(adapter->vlgrp &&
3748 (status & E1000_RXD_STAT_VP))) {
3749 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3750 le16_to_cpu(rx_desc->special) &
3751 E1000_RXD_SPC_VLAN_MASK);
3753 netif_receive_skb(skb);
3755 #else /* CONFIG_E1000_NAPI */
3756 if (unlikely(adapter->vlgrp &&
3757 (status & E1000_RXD_STAT_VP))) {
3758 vlan_hwaccel_rx(skb, adapter->vlgrp,
3759 le16_to_cpu(rx_desc->special) &
3760 E1000_RXD_SPC_VLAN_MASK);
3764 #endif /* CONFIG_E1000_NAPI */
3765 netdev->last_rx = jiffies;
3768 rx_desc->status = 0;
3770 /* return some buffers to hardware, one at a time is too slow */
3771 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3772 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3776 /* use prefetched values */
3778 buffer_info = next_buffer;
3780 rx_ring->next_to_clean = i;
3782 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3784 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3790 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
3791 * @adapter: board private structure
3795 #ifdef CONFIG_E1000_NAPI
3796 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3797 struct e1000_rx_ring *rx_ring,
3798 int *work_done, int work_to_do)
3800 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3801 struct e1000_rx_ring *rx_ring)
3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3805 struct net_device *netdev = adapter->netdev;
3806 struct pci_dev *pdev = adapter->pdev;
3807 struct e1000_buffer *buffer_info, *next_buffer;
3808 struct e1000_ps_page *ps_page;
3809 struct e1000_ps_page_dma *ps_page_dma;
3810 struct sk_buff *skb;
3812 uint32_t length, staterr;
3813 int cleaned_count = 0;
3814 boolean_t cleaned = FALSE;
3816 i = rx_ring->next_to_clean;
3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3821 while (staterr & E1000_RXD_STAT_DD) {
3822 ps_page = &rx_ring->ps_page[i];
3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3824 #ifdef CONFIG_E1000_NAPI
3825 if (unlikely(*work_done >= work_to_do))
3829 skb = buffer_info->skb;
3831 /* in the packet split case this is header only */
3832 prefetch(skb->data - NET_IP_ALIGN);
3834 if (++i == rx_ring->count) i = 0;
3835 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3838 next_buffer = &rx_ring->buffer_info[i];
3842 pci_unmap_single(pdev, buffer_info->dma,
3843 buffer_info->length,
3844 PCI_DMA_FROMDEVICE);
3846 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3847 E1000_DBG("%s: Packet Split buffers didn't pick up"
3848 " the full packet\n", netdev->name);
3849 dev_kfree_skb_irq(skb);
3853 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3854 dev_kfree_skb_irq(skb);
3858 length = le16_to_cpu(rx_desc->wb.middle.length0);
3860 if (unlikely(!length)) {
3861 E1000_DBG("%s: Last part of the packet spanning"
3862 " multiple descriptors\n", netdev->name);
3863 dev_kfree_skb_irq(skb);
3868 skb_put(skb, length);
3871 /* this looks ugly, but it seems compiler issues make it
3872 more efficient than reusing j */
3873 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
3875 /* page alloc/put takes too long and effects small packet
3876 * throughput, so unsplit small packets and save the alloc/put*/
3877 if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
3879 /* there is no documentation about how to call
3880 * kmap_atomic, so we can't hold the mapping
3882 pci_dma_sync_single_for_cpu(pdev,
3883 ps_page_dma->ps_page_dma[0],
3885 PCI_DMA_FROMDEVICE);
3886 vaddr = kmap_atomic(ps_page->ps_page[0],
3887 KM_SKB_DATA_SOFTIRQ);
3888 memcpy(skb->tail, vaddr, l1);
3889 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
3890 pci_dma_sync_single_for_device(pdev,
3891 ps_page_dma->ps_page_dma[0],
3892 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3893 /* remove the CRC */
3900 for (j = 0; j < adapter->rx_ps_pages; j++) {
3901 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
3903 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3904 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3905 ps_page_dma->ps_page_dma[j] = 0;
3906 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
3908 ps_page->ps_page[j] = NULL;
3910 skb->data_len += length;
3911 skb->truesize += length;
3914 /* strip the ethernet crc, problem is we're using pages now so
3915 * this whole operation can get a little cpu intensive */
3916 pskb_trim(skb, skb->len - 4);
3919 e1000_rx_checksum(adapter, staterr,
3920 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
3921 skb->protocol = eth_type_trans(skb, netdev);
3923 if (likely(rx_desc->wb.upper.header_status &
3924 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
3925 adapter->rx_hdr_split++;
3926 #ifdef CONFIG_E1000_NAPI
3927 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3928 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3929 le16_to_cpu(rx_desc->wb.middle.vlan) &
3930 E1000_RXD_SPC_VLAN_MASK);
3932 netif_receive_skb(skb);
3934 #else /* CONFIG_E1000_NAPI */
3935 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3936 vlan_hwaccel_rx(skb, adapter->vlgrp,
3937 le16_to_cpu(rx_desc->wb.middle.vlan) &
3938 E1000_RXD_SPC_VLAN_MASK);
3942 #endif /* CONFIG_E1000_NAPI */
3943 netdev->last_rx = jiffies;
3946 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
3947 buffer_info->skb = NULL;
3949 /* return some buffers to hardware, one at a time is too slow */
3950 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3951 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3955 /* use prefetched values */
3957 buffer_info = next_buffer;
3959 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3961 rx_ring->next_to_clean = i;
3963 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3965 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3971 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
3972 * @adapter: address of board private structure
3976 e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3977 struct e1000_rx_ring *rx_ring,
3980 struct net_device *netdev = adapter->netdev;
3981 struct pci_dev *pdev = adapter->pdev;
3982 struct e1000_rx_desc *rx_desc;
3983 struct e1000_buffer *buffer_info;
3984 struct sk_buff *skb;
3986 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
3988 i = rx_ring->next_to_use;
3989 buffer_info = &rx_ring->buffer_info[i];
3991 while (cleaned_count--) {
3992 if (!(skb = buffer_info->skb))
3993 skb = netdev_alloc_skb(netdev, bufsz);
3999 if (unlikely(!skb)) {
4000 /* Better luck next round */
4001 adapter->alloc_rx_buff_failed++;
4005 /* Fix for errata 23, can't cross 64kB boundary */
4006 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4007 struct sk_buff *oldskb = skb;
4008 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4009 "at %p\n", bufsz, skb->data);
4010 /* Try again, without freeing the previous */
4011 skb = netdev_alloc_skb(netdev, bufsz);
4012 /* Failed allocation, critical failure */
4014 dev_kfree_skb(oldskb);
4018 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4021 dev_kfree_skb(oldskb);
4022 break; /* while !buffer_info->skb */
4024 /* Use new allocation */
4025 dev_kfree_skb(oldskb);
4028 /* Make buffer alignment 2 beyond a 16 byte boundary
4029 * this will result in a 16 byte aligned IP header after
4030 * the 14 byte MAC header is removed
4032 skb_reserve(skb, NET_IP_ALIGN);
4036 buffer_info->skb = skb;
4037 buffer_info->length = adapter->rx_buffer_len;
4039 buffer_info->dma = pci_map_single(pdev,
4041 adapter->rx_buffer_len,
4042 PCI_DMA_FROMDEVICE);
4044 /* Fix for errata 23, can't cross 64kB boundary */
4045 if (!e1000_check_64k_bound(adapter,
4046 (void *)(unsigned long)buffer_info->dma,
4047 adapter->rx_buffer_len)) {
4048 DPRINTK(RX_ERR, ERR,
4049 "dma align check failed: %u bytes at %p\n",
4050 adapter->rx_buffer_len,
4051 (void *)(unsigned long)buffer_info->dma);
4053 buffer_info->skb = NULL;
4055 pci_unmap_single(pdev, buffer_info->dma,
4056 adapter->rx_buffer_len,
4057 PCI_DMA_FROMDEVICE);
4059 break; /* while !buffer_info->skb */
4061 rx_desc = E1000_RX_DESC(*rx_ring, i);
4062 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4064 if (unlikely(++i == rx_ring->count))
4066 buffer_info = &rx_ring->buffer_info[i];
4069 if (likely(rx_ring->next_to_use != i)) {
4070 rx_ring->next_to_use = i;
4071 if (unlikely(i-- == 0))
4072 i = (rx_ring->count - 1);
4074 /* Force memory writes to complete before letting h/w
4075 * know there are new descriptors to fetch. (Only
4076 * applicable for weak-ordered memory model archs,
4077 * such as IA-64). */
4079 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4084 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4085 * @adapter: address of board private structure
4089 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4090 struct e1000_rx_ring *rx_ring,
4093 struct net_device *netdev = adapter->netdev;
4094 struct pci_dev *pdev = adapter->pdev;
4095 union e1000_rx_desc_packet_split *rx_desc;
4096 struct e1000_buffer *buffer_info;
4097 struct e1000_ps_page *ps_page;
4098 struct e1000_ps_page_dma *ps_page_dma;
4099 struct sk_buff *skb;
4102 i = rx_ring->next_to_use;
4103 buffer_info = &rx_ring->buffer_info[i];
4104 ps_page = &rx_ring->ps_page[i];
4105 ps_page_dma = &rx_ring->ps_page_dma[i];
4107 while (cleaned_count--) {
4108 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4110 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4111 if (j < adapter->rx_ps_pages) {
4112 if (likely(!ps_page->ps_page[j])) {
4113 ps_page->ps_page[j] =
4114 alloc_page(GFP_ATOMIC);
4115 if (unlikely(!ps_page->ps_page[j])) {
4116 adapter->alloc_rx_buff_failed++;
4119 ps_page_dma->ps_page_dma[j] =
4121 ps_page->ps_page[j],
4123 PCI_DMA_FROMDEVICE);
4125 /* Refresh the desc even if buffer_addrs didn't
4126 * change because each write-back erases
4129 rx_desc->read.buffer_addr[j+1] =
4130 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4132 rx_desc->read.buffer_addr[j+1] = ~0;
4135 skb = netdev_alloc_skb(netdev,
4136 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4138 if (unlikely(!skb)) {
4139 adapter->alloc_rx_buff_failed++;
4143 /* Make buffer alignment 2 beyond a 16 byte boundary
4144 * this will result in a 16 byte aligned IP header after
4145 * the 14 byte MAC header is removed
4147 skb_reserve(skb, NET_IP_ALIGN);
4151 buffer_info->skb = skb;
4152 buffer_info->length = adapter->rx_ps_bsize0;
4153 buffer_info->dma = pci_map_single(pdev, skb->data,
4154 adapter->rx_ps_bsize0,
4155 PCI_DMA_FROMDEVICE);
4157 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4159 if (unlikely(++i == rx_ring->count)) i = 0;
4160 buffer_info = &rx_ring->buffer_info[i];
4161 ps_page = &rx_ring->ps_page[i];
4162 ps_page_dma = &rx_ring->ps_page_dma[i];
4166 if (likely(rx_ring->next_to_use != i)) {
4167 rx_ring->next_to_use = i;
4168 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4170 /* Force memory writes to complete before letting h/w
4171 * know there are new descriptors to fetch. (Only
4172 * applicable for weak-ordered memory model archs,
4173 * such as IA-64). */
4175 /* Hardware increments by 16 bytes, but packet split
4176 * descriptors are 32 bytes...so we increment tail
4179 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4184 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4189 e1000_smartspeed(struct e1000_adapter *adapter)
4191 uint16_t phy_status;
4194 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4195 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4198 if (adapter->smartspeed == 0) {
4199 /* If Master/Slave config fault is asserted twice,
4200 * we assume back-to-back */
4201 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4202 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4203 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4204 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4205 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4206 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4207 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4208 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4210 adapter->smartspeed++;
4211 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4212 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4214 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4215 MII_CR_RESTART_AUTO_NEG);
4216 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
4221 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4222 /* If still no link, perhaps using 2/3 pair cable */
4223 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4224 phy_ctrl |= CR_1000T_MS_ENABLE;
4225 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4226 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4227 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4228 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4229 MII_CR_RESTART_AUTO_NEG);
4230 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
4233 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4234 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4235 adapter->smartspeed = 0;
4246 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4252 return e1000_mii_ioctl(netdev, ifr, cmd);
4266 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4268 struct e1000_adapter *adapter = netdev_priv(netdev);
4269 struct mii_ioctl_data *data = if_mii(ifr);
4273 unsigned long flags;
4275 if (adapter->hw.media_type != e1000_media_type_copper)
4280 data->phy_id = adapter->hw.phy_addr;
4283 if (!capable(CAP_NET_ADMIN))
4285 spin_lock_irqsave(&adapter->stats_lock, flags);
4286 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4288 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4291 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4294 if (!capable(CAP_NET_ADMIN))
4296 if (data->reg_num & ~(0x1F))
4298 mii_reg = data->val_in;
4299 spin_lock_irqsave(&adapter->stats_lock, flags);
4300 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4302 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4305 if (adapter->hw.media_type == e1000_media_type_copper) {
4306 switch (data->reg_num) {
4308 if (mii_reg & MII_CR_POWER_DOWN)
4310 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4311 adapter->hw.autoneg = 1;
4312 adapter->hw.autoneg_advertised = 0x2F;
4315 spddplx = SPEED_1000;
4316 else if (mii_reg & 0x2000)
4317 spddplx = SPEED_100;
4320 spddplx += (mii_reg & 0x100)
4323 retval = e1000_set_spd_dplx(adapter,
4326 spin_unlock_irqrestore(
4327 &adapter->stats_lock,
4332 if (netif_running(adapter->netdev))
4333 e1000_reinit_locked(adapter);
4335 e1000_reset(adapter);
4337 case M88E1000_PHY_SPEC_CTRL:
4338 case M88E1000_EXT_PHY_SPEC_CTRL:
4339 if (e1000_phy_reset(&adapter->hw)) {
4340 spin_unlock_irqrestore(
4341 &adapter->stats_lock, flags);
4347 switch (data->reg_num) {
4349 if (mii_reg & MII_CR_POWER_DOWN)
4351 if (netif_running(adapter->netdev))
4352 e1000_reinit_locked(adapter);
4354 e1000_reset(adapter);
4358 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4363 return E1000_SUCCESS;
4367 e1000_pci_set_mwi(struct e1000_hw *hw)
4369 struct e1000_adapter *adapter = hw->back;
4370 int ret_val = pci_set_mwi(adapter->pdev);
4373 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4377 e1000_pci_clear_mwi(struct e1000_hw *hw)
4379 struct e1000_adapter *adapter = hw->back;
4381 pci_clear_mwi(adapter->pdev);
4385 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4387 struct e1000_adapter *adapter = hw->back;
4389 pci_read_config_word(adapter->pdev, reg, value);
4393 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4395 struct e1000_adapter *adapter = hw->back;
4397 pci_write_config_word(adapter->pdev, reg, *value);
4402 e1000_io_read(struct e1000_hw *hw, unsigned long port)
4409 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
4415 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4417 struct e1000_adapter *adapter = netdev_priv(netdev);
4418 uint32_t ctrl, rctl;
4420 e1000_irq_disable(adapter);
4421 adapter->vlgrp = grp;
4424 /* enable VLAN tag insert/strip */
4425 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4426 ctrl |= E1000_CTRL_VME;
4427 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4429 if (adapter->hw.mac_type != e1000_ich8lan) {
4430 /* enable VLAN receive filtering */
4431 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4432 rctl |= E1000_RCTL_VFE;
4433 rctl &= ~E1000_RCTL_CFIEN;
4434 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4435 e1000_update_mng_vlan(adapter);
4438 /* disable VLAN tag insert/strip */
4439 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4440 ctrl &= ~E1000_CTRL_VME;
4441 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4443 if (adapter->hw.mac_type != e1000_ich8lan) {
4444 /* disable VLAN filtering */
4445 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4446 rctl &= ~E1000_RCTL_VFE;
4447 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4448 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4449 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4450 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4455 e1000_irq_enable(adapter);
4459 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4461 struct e1000_adapter *adapter = netdev_priv(netdev);
4462 uint32_t vfta, index;
4464 if ((adapter->hw.mng_cookie.status &
4465 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4466 (vid == adapter->mng_vlan_id))
4468 /* add VID to filter table */
4469 index = (vid >> 5) & 0x7F;
4470 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
4471 vfta |= (1 << (vid & 0x1F));
4472 e1000_write_vfta(&adapter->hw, index, vfta);
4476 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4478 struct e1000_adapter *adapter = netdev_priv(netdev);
4479 uint32_t vfta, index;
4481 e1000_irq_disable(adapter);
4484 adapter->vlgrp->vlan_devices[vid] = NULL;
4486 e1000_irq_enable(adapter);
4488 if ((adapter->hw.mng_cookie.status &
4489 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4490 (vid == adapter->mng_vlan_id)) {
4491 /* release control to f/w */
4492 e1000_release_hw_control(adapter);
4496 /* remove VID from filter table */
4497 index = (vid >> 5) & 0x7F;
4498 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
4499 vfta &= ~(1 << (vid & 0x1F));
4500 e1000_write_vfta(&adapter->hw, index, vfta);
4504 e1000_restore_vlan(struct e1000_adapter *adapter)
4506 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4508 if (adapter->vlgrp) {
4510 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4511 if (!adapter->vlgrp->vlan_devices[vid])
4513 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4519 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4521 adapter->hw.autoneg = 0;
4523 /* Fiber NICs only allow 1000 gbps Full duplex */
4524 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4525 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4526 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4531 case SPEED_10 + DUPLEX_HALF:
4532 adapter->hw.forced_speed_duplex = e1000_10_half;
4534 case SPEED_10 + DUPLEX_FULL:
4535 adapter->hw.forced_speed_duplex = e1000_10_full;
4537 case SPEED_100 + DUPLEX_HALF:
4538 adapter->hw.forced_speed_duplex = e1000_100_half;
4540 case SPEED_100 + DUPLEX_FULL:
4541 adapter->hw.forced_speed_duplex = e1000_100_full;
4543 case SPEED_1000 + DUPLEX_FULL:
4544 adapter->hw.autoneg = 1;
4545 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
4547 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4549 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4556 /* Save/restore 16 or 64 dwords of PCI config space depending on which
4557 * bus we're on (PCI(X) vs. PCI-E)
4559 #define PCIE_CONFIG_SPACE_LEN 256
4560 #define PCI_CONFIG_SPACE_LEN 64
4562 e1000_pci_save_state(struct e1000_adapter *adapter)
4564 struct pci_dev *dev = adapter->pdev;
4568 if (adapter->hw.mac_type >= e1000_82571)
4569 size = PCIE_CONFIG_SPACE_LEN;
4571 size = PCI_CONFIG_SPACE_LEN;
4573 WARN_ON(adapter->config_space != NULL);
4575 adapter->config_space = kmalloc(size, GFP_KERNEL);
4576 if (!adapter->config_space) {
4577 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4580 for (i = 0; i < (size / 4); i++)
4581 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4586 e1000_pci_restore_state(struct e1000_adapter *adapter)
4588 struct pci_dev *dev = adapter->pdev;
4592 if (adapter->config_space == NULL)
4595 if (adapter->hw.mac_type >= e1000_82571)
4596 size = PCIE_CONFIG_SPACE_LEN;
4598 size = PCI_CONFIG_SPACE_LEN;
4599 for (i = 0; i < (size / 4); i++)
4600 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4601 kfree(adapter->config_space);
4602 adapter->config_space = NULL;
4605 #endif /* CONFIG_PM */
4608 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4610 struct net_device *netdev = pci_get_drvdata(pdev);
4611 struct e1000_adapter *adapter = netdev_priv(netdev);
4612 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4613 uint32_t wufc = adapter->wol;
4618 netif_device_detach(netdev);
4620 if (netif_running(netdev)) {
4621 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4622 e1000_down(adapter);
4626 /* Implement our own version of pci_save_state(pdev) because pci-
4627 * express adapters have 256-byte config spaces. */
4628 retval = e1000_pci_save_state(adapter);
4633 status = E1000_READ_REG(&adapter->hw, STATUS);
4634 if (status & E1000_STATUS_LU)
4635 wufc &= ~E1000_WUFC_LNKC;
4638 e1000_setup_rctl(adapter);
4639 e1000_set_multi(netdev);
4641 /* turn on all-multi mode if wake on multicast is enabled */
4642 if (adapter->wol & E1000_WUFC_MC) {
4643 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4644 rctl |= E1000_RCTL_MPE;
4645 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4648 if (adapter->hw.mac_type >= e1000_82540) {
4649 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4650 /* advertise wake from D3Cold */
4651 #define E1000_CTRL_ADVD3WUC 0x00100000
4652 /* phy power management enable */
4653 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4654 ctrl |= E1000_CTRL_ADVD3WUC |
4655 E1000_CTRL_EN_PHY_PWR_MGMT;
4656 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4659 if (adapter->hw.media_type == e1000_media_type_fiber ||
4660 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4661 /* keep the laser running in D3 */
4662 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4663 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4664 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
4667 /* Allow time for pending master requests to run */
4668 e1000_disable_pciex_master(&adapter->hw);
4670 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
4671 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
4672 pci_enable_wake(pdev, PCI_D3hot, 1);
4673 pci_enable_wake(pdev, PCI_D3cold, 1);
4675 E1000_WRITE_REG(&adapter->hw, WUC, 0);
4676 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
4677 pci_enable_wake(pdev, PCI_D3hot, 0);
4678 pci_enable_wake(pdev, PCI_D3cold, 0);
4681 /* FIXME: this code is incorrect for PCI Express */
4682 if (adapter->hw.mac_type >= e1000_82540 &&
4683 adapter->hw.mac_type != e1000_ich8lan &&
4684 adapter->hw.media_type == e1000_media_type_copper) {
4685 manc = E1000_READ_REG(&adapter->hw, MANC);
4686 if (manc & E1000_MANC_SMBUS_EN) {
4687 manc |= E1000_MANC_ARP_EN;
4688 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4689 pci_enable_wake(pdev, PCI_D3hot, 1);
4690 pci_enable_wake(pdev, PCI_D3cold, 1);
4694 if (adapter->hw.phy_type == e1000_phy_igp_3)
4695 e1000_phy_powerdown_workaround(&adapter->hw);
4697 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4698 * would have already happened in close and is redundant. */
4699 e1000_release_hw_control(adapter);
4701 pci_disable_device(pdev);
4703 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4710 e1000_resume(struct pci_dev *pdev)
4712 struct net_device *netdev = pci_get_drvdata(pdev);
4713 struct e1000_adapter *adapter = netdev_priv(netdev);
4714 uint32_t manc, ret_val;
4716 pci_set_power_state(pdev, PCI_D0);
4717 e1000_pci_restore_state(adapter);
4718 ret_val = pci_enable_device(pdev);
4719 pci_set_master(pdev);
4721 pci_enable_wake(pdev, PCI_D3hot, 0);
4722 pci_enable_wake(pdev, PCI_D3cold, 0);
4724 e1000_reset(adapter);
4725 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4727 if (netif_running(netdev))
4730 netif_device_attach(netdev);
4732 /* FIXME: this code is incorrect for PCI Express */
4733 if (adapter->hw.mac_type >= e1000_82540 &&
4734 adapter->hw.mac_type != e1000_ich8lan &&
4735 adapter->hw.media_type == e1000_media_type_copper) {
4736 manc = E1000_READ_REG(&adapter->hw, MANC);
4737 manc &= ~(E1000_MANC_ARP_EN);
4738 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4741 /* If the controller is 82573 and f/w is AMT, do not set
4742 * DRV_LOAD until the interface is up. For all other cases,
4743 * let the f/w know that the h/w is now under the control
4745 if (adapter->hw.mac_type != e1000_82573 ||
4746 !e1000_check_mng_mode(&adapter->hw))
4747 e1000_get_hw_control(adapter);
4753 static void e1000_shutdown(struct pci_dev *pdev)
4755 e1000_suspend(pdev, PMSG_SUSPEND);
4758 #ifdef CONFIG_NET_POLL_CONTROLLER
4760 * Polling 'interrupt' - used by things like netconsole to send skbs
4761 * without having to re-enable interrupts. It's not called while
4762 * the interrupt routine is executing.
4765 e1000_netpoll(struct net_device *netdev)
4767 struct e1000_adapter *adapter = netdev_priv(netdev);
4769 disable_irq(adapter->pdev->irq);
4770 e1000_intr(adapter->pdev->irq, netdev, NULL);
4771 e1000_clean_tx_irq(adapter, adapter->tx_ring);
4772 #ifndef CONFIG_E1000_NAPI
4773 adapter->clean_rx(adapter, adapter->rx_ring);
4775 enable_irq(adapter->pdev->irq);
4780 * e1000_io_error_detected - called when PCI error is detected
4781 * @pdev: Pointer to PCI device
4782 * @state: The current pci conneection state
4784 * This function is called after a PCI bus error affecting
4785 * this device has been detected.
4787 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4789 struct net_device *netdev = pci_get_drvdata(pdev);
4790 struct e1000_adapter *adapter = netdev->priv;
4792 netif_device_detach(netdev);
4794 if (netif_running(netdev))
4795 e1000_down(adapter);
4797 /* Request a slot slot reset. */
4798 return PCI_ERS_RESULT_NEED_RESET;
4802 * e1000_io_slot_reset - called after the pci bus has been reset.
4803 * @pdev: Pointer to PCI device
4805 * Restart the card from scratch, as if from a cold-boot. Implementation
4806 * resembles the first-half of the e1000_resume routine.
4808 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4810 struct net_device *netdev = pci_get_drvdata(pdev);
4811 struct e1000_adapter *adapter = netdev->priv;
4813 if (pci_enable_device(pdev)) {
4814 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
4815 return PCI_ERS_RESULT_DISCONNECT;
4817 pci_set_master(pdev);
4819 pci_enable_wake(pdev, 3, 0);
4820 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
4822 /* Perform card reset only on one instance of the card */
4823 if (PCI_FUNC (pdev->devfn) != 0)
4824 return PCI_ERS_RESULT_RECOVERED;
4826 e1000_reset(adapter);
4827 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4829 return PCI_ERS_RESULT_RECOVERED;
4833 * e1000_io_resume - called when traffic can start flowing again.
4834 * @pdev: Pointer to PCI device
4836 * This callback is called when the error recovery driver tells us that
4837 * its OK to resume normal operation. Implementation resembles the
4838 * second-half of the e1000_resume routine.
4840 static void e1000_io_resume(struct pci_dev *pdev)
4842 struct net_device *netdev = pci_get_drvdata(pdev);
4843 struct e1000_adapter *adapter = netdev->priv;
4844 uint32_t manc, swsm;
4846 if (netif_running(netdev)) {
4847 if (e1000_up(adapter)) {
4848 printk("e1000: can't bring device back up after reset\n");
4853 netif_device_attach(netdev);
4855 if (adapter->hw.mac_type >= e1000_82540 &&
4856 adapter->hw.media_type == e1000_media_type_copper) {
4857 manc = E1000_READ_REG(&adapter->hw, MANC);
4858 manc &= ~(E1000_MANC_ARP_EN);
4859 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4862 switch (adapter->hw.mac_type) {
4864 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4865 E1000_WRITE_REG(&adapter->hw, SWSM,
4866 swsm | E1000_SWSM_DRV_LOAD);
4872 if (netif_running(netdev))
4873 mod_timer(&adapter->watchdog_timer, jiffies);