1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
45 #include "ixgbe_common.h"
47 char ixgbe_driver_name[] = "ixgbe";
48 static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 #define DRV_VERSION "1.3.18-k2"
52 const char ixgbe_driver_version[] = DRV_VERSION;
53 static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
56 static const struct ixgbe_info *ixgbe_info_tbl[] = {
57 [board_82598] = &ixgbe_82598_info,
60 /* ixgbe_pci_tbl - PCI Device ID Table
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
68 static struct pci_device_id ixgbe_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
76 /* required last entry */
79 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
84 static struct notifier_block dca_notifier = {
85 .notifier_call = ixgbe_notify_dca,
91 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
92 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_VERSION);
96 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
98 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
102 /* Let firmware take over control of h/w */
103 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
105 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
112 /* Let firmware know the driver has taken over */
113 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
114 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
115 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
120 * ixgbe_get_hw_dev_name - return device name string
121 * used by hardware layer to print debugging information
123 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
125 struct ixgbe_adapter *adapter = hw->back;
126 struct net_device *netdev = adapter->netdev;
131 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
136 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
137 index = (int_alloc_entry >> 2) & 0x1F;
138 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
139 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
140 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
141 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
144 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
145 struct ixgbe_tx_buffer
148 if (tx_buffer_info->dma) {
149 pci_unmap_page(adapter->pdev,
151 tx_buffer_info->length, PCI_DMA_TODEVICE);
152 tx_buffer_info->dma = 0;
154 if (tx_buffer_info->skb) {
155 dev_kfree_skb_any(tx_buffer_info->skb);
156 tx_buffer_info->skb = NULL;
158 /* tx_buffer_info must be completely set up in the transmit path */
161 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
162 struct ixgbe_ring *tx_ring,
164 union ixgbe_adv_tx_desc *eop_desc)
166 /* Detect a transmit hang in hardware, this serializes the
167 * check with the clearing of time_stamp and movement of i */
168 adapter->detect_tx_hung = false;
169 if (tx_ring->tx_buffer_info[eop].dma &&
170 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
171 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
172 /* detected Tx unit hang */
173 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
176 " next_to_use <%x>\n"
177 " next_to_clean <%x>\n"
178 "tx_buffer_info[next_to_clean]\n"
179 " time_stamp <%lx>\n"
180 " next_to_watch <%x>\n"
182 " next_to_watch.status <%x>\n",
183 readl(adapter->hw.hw_addr + tx_ring->head),
184 readl(adapter->hw.hw_addr + tx_ring->tail),
185 tx_ring->next_to_use,
186 tx_ring->next_to_clean,
187 tx_ring->tx_buffer_info[eop].time_stamp,
188 eop, jiffies, eop_desc->wb.status);
195 #define IXGBE_MAX_TXD_PWR 14
196 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
198 /* Tx Descriptors needed, worst case */
199 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
200 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
201 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
202 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
205 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
206 * @adapter: board private structure
208 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
209 struct ixgbe_ring *tx_ring)
211 struct net_device *netdev = adapter->netdev;
212 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
213 struct ixgbe_tx_buffer *tx_buffer_info;
215 bool cleaned = false;
216 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
218 i = tx_ring->next_to_clean;
219 eop = tx_ring->tx_buffer_info[i].next_to_watch;
220 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
221 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
224 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
225 tx_buffer_info = &tx_ring->tx_buffer_info[i];
226 cleaned = (i == eop);
228 tx_ring->stats.bytes += tx_buffer_info->length;
230 struct sk_buff *skb = tx_buffer_info->skb;
231 unsigned int segs, bytecount;
232 segs = skb_shinfo(skb)->gso_segs ?: 1;
233 /* multiply data chunks by size of headers */
234 bytecount = ((segs - 1) * skb_headlen(skb)) +
236 total_tx_packets += segs;
237 total_tx_bytes += bytecount;
239 ixgbe_unmap_and_free_tx_resource(adapter,
241 tx_desc->wb.status = 0;
244 if (i == tx_ring->count)
248 tx_ring->stats.packets++;
250 eop = tx_ring->tx_buffer_info[i].next_to_watch;
251 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
253 /* weight of a sort for tx, avoid endless transmit cleanup */
254 if (total_tx_packets >= tx_ring->work_limit)
258 tx_ring->next_to_clean = i;
260 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
261 if (total_tx_packets && netif_carrier_ok(netdev) &&
262 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
263 /* Make sure that anybody stopping the queue after this
264 * sees the new next_to_clean.
267 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
268 !test_bit(__IXGBE_DOWN, &adapter->state)) {
269 netif_wake_subqueue(netdev, tx_ring->queue_index);
270 adapter->restart_queue++;
274 if (adapter->detect_tx_hung)
275 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
276 netif_stop_subqueue(netdev, tx_ring->queue_index);
278 if (total_tx_packets >= tx_ring->work_limit)
279 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
281 tx_ring->total_bytes += total_tx_bytes;
282 tx_ring->total_packets += total_tx_packets;
283 adapter->net_stats.tx_bytes += total_tx_bytes;
284 adapter->net_stats.tx_packets += total_tx_packets;
285 cleaned = total_tx_packets ? true : false;
290 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
291 struct ixgbe_ring *rxr)
295 int q = rxr - adapter->rx_ring;
297 if (rxr->cpu != cpu) {
298 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
299 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
300 rxctrl |= dca_get_tag(cpu);
301 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
302 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
303 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
309 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
310 struct ixgbe_ring *txr)
314 int q = txr - adapter->tx_ring;
316 if (txr->cpu != cpu) {
317 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
318 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
319 txctrl |= dca_get_tag(cpu);
320 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
327 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
331 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
334 for (i = 0; i < adapter->num_tx_queues; i++) {
335 adapter->tx_ring[i].cpu = -1;
336 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
338 for (i = 0; i < adapter->num_rx_queues; i++) {
339 adapter->rx_ring[i].cpu = -1;
340 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
344 static int __ixgbe_notify_dca(struct device *dev, void *data)
346 struct net_device *netdev = dev_get_drvdata(dev);
347 struct ixgbe_adapter *adapter = netdev_priv(netdev);
348 unsigned long event = *(unsigned long *)data;
351 case DCA_PROVIDER_ADD:
352 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
353 /* Always use CB2 mode, difference is masked
354 * in the CB driver. */
355 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
356 if (dca_add_requester(dev) == 0) {
357 ixgbe_setup_dca(adapter);
360 /* Fall Through since DCA is disabled. */
361 case DCA_PROVIDER_REMOVE:
362 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
363 dca_remove_requester(dev);
364 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
365 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
373 #endif /* CONFIG_DCA */
375 * ixgbe_receive_skb - Send a completed packet up the stack
376 * @adapter: board private structure
377 * @skb: packet to send up
378 * @status: hardware indication of status of receive
379 * @rx_ring: rx descriptor ring (for a specific queue) to setup
380 * @rx_desc: rx descriptor
382 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
383 struct sk_buff *skb, u8 status,
384 struct ixgbe_ring *ring,
385 union ixgbe_adv_rx_desc *rx_desc)
387 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
388 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
390 if (adapter->netdev->features & NETIF_F_LRO &&
391 skb->ip_summed == CHECKSUM_UNNECESSARY) {
392 if (adapter->vlgrp && is_vlan)
393 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
397 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
398 ring->lro_used = true;
400 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
401 if (adapter->vlgrp && is_vlan)
402 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
404 netif_receive_skb(skb);
406 if (adapter->vlgrp && is_vlan)
407 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
415 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
416 * @adapter: address of board private structure
417 * @status_err: hardware indication of status of receive
418 * @skb: skb currently being received and modified
420 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
424 skb->ip_summed = CHECKSUM_NONE;
426 /* Ignore Checksum bit is set, or rx csum disabled */
427 if ((status_err & IXGBE_RXD_STAT_IXSM) ||
428 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
431 /* if IP and error */
432 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
433 (status_err & IXGBE_RXDADV_ERR_IPE)) {
434 adapter->hw_csum_rx_error++;
438 if (!(status_err & IXGBE_RXD_STAT_L4CS))
441 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
442 adapter->hw_csum_rx_error++;
446 /* It must be a TCP or UDP packet with a valid checksum */
447 skb->ip_summed = CHECKSUM_UNNECESSARY;
448 adapter->hw_csum_rx_good++;
452 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
453 * @adapter: address of board private structure
455 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
456 struct ixgbe_ring *rx_ring,
459 struct net_device *netdev = adapter->netdev;
460 struct pci_dev *pdev = adapter->pdev;
461 union ixgbe_adv_rx_desc *rx_desc;
462 struct ixgbe_rx_buffer *rx_buffer_info;
465 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
467 i = rx_ring->next_to_use;
468 rx_buffer_info = &rx_ring->rx_buffer_info[i];
470 while (cleaned_count--) {
471 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
473 if (!rx_buffer_info->page &&
474 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
475 rx_buffer_info->page = alloc_page(GFP_ATOMIC);
476 if (!rx_buffer_info->page) {
477 adapter->alloc_rx_page_failed++;
480 rx_buffer_info->page_dma =
481 pci_map_page(pdev, rx_buffer_info->page,
482 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
485 if (!rx_buffer_info->skb) {
486 skb = netdev_alloc_skb(netdev, bufsz);
489 adapter->alloc_rx_buff_failed++;
494 * Make buffer alignment 2 beyond a 16 byte boundary
495 * this will result in a 16 byte aligned IP header after
496 * the 14 byte MAC header is removed
498 skb_reserve(skb, NET_IP_ALIGN);
500 rx_buffer_info->skb = skb;
501 rx_buffer_info->dma = pci_map_single(pdev, skb->data,
505 /* Refresh the desc even if buffer_addrs didn't change because
506 * each write-back erases this info. */
507 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
508 rx_desc->read.pkt_addr =
509 cpu_to_le64(rx_buffer_info->page_dma);
510 rx_desc->read.hdr_addr =
511 cpu_to_le64(rx_buffer_info->dma);
513 rx_desc->read.pkt_addr =
514 cpu_to_le64(rx_buffer_info->dma);
518 if (i == rx_ring->count)
520 rx_buffer_info = &rx_ring->rx_buffer_info[i];
523 if (rx_ring->next_to_use != i) {
524 rx_ring->next_to_use = i;
526 i = (rx_ring->count - 1);
529 * Force memory writes to complete before letting h/w
530 * know there are new descriptors to fetch. (Only
531 * applicable for weak-ordered memory model archs,
535 writel(i, adapter->hw.hw_addr + rx_ring->tail);
539 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
540 struct ixgbe_ring *rx_ring,
541 int *work_done, int work_to_do)
543 struct net_device *netdev = adapter->netdev;
544 struct pci_dev *pdev = adapter->pdev;
545 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
546 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
549 u32 upper_len, len, staterr;
551 bool cleaned = false;
552 int cleaned_count = 0;
553 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
555 i = rx_ring->next_to_clean;
557 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
558 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
559 rx_buffer_info = &rx_ring->rx_buffer_info[i];
561 while (staterr & IXGBE_RXD_STAT_DD) {
562 if (*work_done >= work_to_do)
566 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
568 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
570 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
571 IXGBE_RXDADV_HDRBUFLEN_SHIFT);
572 if (hdr_info & IXGBE_RXDADV_SPH)
573 adapter->rx_hdr_split++;
574 if (len > IXGBE_RX_HDR_SIZE)
575 len = IXGBE_RX_HDR_SIZE;
576 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
578 len = le16_to_cpu(rx_desc->wb.upper.length);
581 skb = rx_buffer_info->skb;
582 prefetch(skb->data - NET_IP_ALIGN);
583 rx_buffer_info->skb = NULL;
585 if (len && !skb_shinfo(skb)->nr_frags) {
586 pci_unmap_single(pdev, rx_buffer_info->dma,
587 adapter->rx_buf_len + NET_IP_ALIGN,
593 pci_unmap_page(pdev, rx_buffer_info->page_dma,
594 PAGE_SIZE, PCI_DMA_FROMDEVICE);
595 rx_buffer_info->page_dma = 0;
596 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
597 rx_buffer_info->page, 0, upper_len);
598 rx_buffer_info->page = NULL;
600 skb->len += upper_len;
601 skb->data_len += upper_len;
602 skb->truesize += upper_len;
606 if (i == rx_ring->count)
608 next_buffer = &rx_ring->rx_buffer_info[i];
610 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
614 if (staterr & IXGBE_RXD_STAT_EOP) {
615 rx_ring->stats.packets++;
616 rx_ring->stats.bytes += skb->len;
618 rx_buffer_info->skb = next_buffer->skb;
619 rx_buffer_info->dma = next_buffer->dma;
620 next_buffer->skb = skb;
621 adapter->non_eop_descs++;
625 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
626 dev_kfree_skb_irq(skb);
630 ixgbe_rx_checksum(adapter, staterr, skb);
632 /* probably a little skewed due to removing CRC */
633 total_rx_bytes += skb->len;
636 skb->protocol = eth_type_trans(skb, netdev);
637 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
638 netdev->last_rx = jiffies;
641 rx_desc->wb.upper.status_error = 0;
643 /* return some buffers to hardware, one at a time is too slow */
644 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
645 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
649 /* use prefetched values */
651 rx_buffer_info = next_buffer;
653 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
656 if (rx_ring->lro_used) {
657 lro_flush_all(&rx_ring->lro_mgr);
658 rx_ring->lro_used = false;
661 rx_ring->next_to_clean = i;
662 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
665 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
667 adapter->net_stats.rx_bytes += total_rx_bytes;
668 adapter->net_stats.rx_packets += total_rx_packets;
670 rx_ring->total_packets += total_rx_packets;
671 rx_ring->total_bytes += total_rx_bytes;
672 adapter->net_stats.rx_bytes += total_rx_bytes;
673 adapter->net_stats.rx_packets += total_rx_packets;
678 static int ixgbe_clean_rxonly(struct napi_struct *, int);
680 * ixgbe_configure_msix - Configure MSI-X hardware
681 * @adapter: board private structure
683 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
686 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
688 struct ixgbe_q_vector *q_vector;
689 int i, j, q_vectors, v_idx, r_idx;
692 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
694 /* Populate the IVAR table and set the ITR values to the
695 * corresponding register.
697 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
698 q_vector = &adapter->q_vector[v_idx];
699 /* XXX for_each_bit(...) */
700 r_idx = find_first_bit(q_vector->rxr_idx,
701 adapter->num_rx_queues);
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 j = adapter->rx_ring[r_idx].reg_idx;
705 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
706 r_idx = find_next_bit(q_vector->rxr_idx,
707 adapter->num_rx_queues,
710 r_idx = find_first_bit(q_vector->txr_idx,
711 adapter->num_tx_queues);
713 for (i = 0; i < q_vector->txr_count; i++) {
714 j = adapter->tx_ring[r_idx].reg_idx;
715 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
716 r_idx = find_next_bit(q_vector->txr_idx,
717 adapter->num_tx_queues,
721 /* if this is a tx only vector use half the irq (tx) rate */
722 if (q_vector->txr_count && !q_vector->rxr_count)
723 q_vector->eitr = adapter->tx_eitr;
725 /* rx only or mixed */
726 q_vector->eitr = adapter->rx_eitr;
728 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
729 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
732 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
733 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
735 /* set up to autoclear timer, lsc, and the vectors */
736 mask = IXGBE_EIMS_ENABLE_MASK;
737 mask &= ~IXGBE_EIMS_OTHER;
738 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
745 latency_invalid = 255
749 * ixgbe_update_itr - update the dynamic ITR value based on statistics
750 * @adapter: pointer to adapter
751 * @eitr: eitr setting (ints per sec) to give last timeslice
752 * @itr_setting: current throttle rate in ints/second
753 * @packets: the number of packets during this measurement interval
754 * @bytes: the number of bytes during this measurement interval
756 * Stores a new ITR value based on packets and byte
757 * counts during the last interrupt. The advantage of per interrupt
758 * computation is faster updates and more accurate ITR for the current
759 * traffic pattern. Constants in this function were computed
760 * based on theoretical maximum wire speed and thresholds were set based
761 * on testing data as well as attempting to minimize response time
762 * while increasing bulk throughput.
763 * this functionality is controlled by the InterruptThrottleRate module
764 * parameter (see ixgbe_param.c)
766 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
767 u32 eitr, u8 itr_setting,
768 int packets, int bytes)
770 unsigned int retval = itr_setting;
775 goto update_itr_done;
778 /* simple throttlerate management
779 * 0-20MB/s lowest (100000 ints/s)
780 * 20-100MB/s low (20000 ints/s)
781 * 100-1249MB/s bulk (8000 ints/s)
783 /* what was last interrupt timeslice? */
784 timepassed_us = 1000000/eitr;
785 bytes_perint = bytes / timepassed_us; /* bytes/usec */
787 switch (itr_setting) {
789 if (bytes_perint > adapter->eitr_low)
790 retval = low_latency;
793 if (bytes_perint > adapter->eitr_high)
794 retval = bulk_latency;
795 else if (bytes_perint <= adapter->eitr_low)
796 retval = lowest_latency;
799 if (bytes_perint <= adapter->eitr_high)
800 retval = low_latency;
808 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
810 struct ixgbe_adapter *adapter = q_vector->adapter;
811 struct ixgbe_hw *hw = &adapter->hw;
813 u8 current_itr, ret_itr;
814 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
815 sizeof(struct ixgbe_q_vector);
816 struct ixgbe_ring *rx_ring, *tx_ring;
818 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
819 for (i = 0; i < q_vector->txr_count; i++) {
820 tx_ring = &(adapter->tx_ring[r_idx]);
821 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
823 tx_ring->total_packets,
824 tx_ring->total_bytes);
825 /* if the result for this queue would decrease interrupt
826 * rate for this vector then use that result */
827 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
828 q_vector->tx_eitr - 1 : ret_itr);
829 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
833 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
834 for (i = 0; i < q_vector->rxr_count; i++) {
835 rx_ring = &(adapter->rx_ring[r_idx]);
836 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
838 rx_ring->total_packets,
839 rx_ring->total_bytes);
840 /* if the result for this queue would decrease interrupt
841 * rate for this vector then use that result */
842 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
843 q_vector->rx_eitr - 1 : ret_itr);
844 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
848 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
850 switch (current_itr) {
851 /* counts and packets in update_itr are dependent on these numbers */
856 new_itr = 20000; /* aka hwitr = ~200 */
864 if (new_itr != q_vector->eitr) {
866 /* do an exponential smoothing */
867 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
868 q_vector->eitr = new_itr;
869 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
870 /* must write high and low 16 bits to reset counter */
871 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
873 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
879 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
881 struct net_device *netdev = data;
882 struct ixgbe_adapter *adapter = netdev_priv(netdev);
883 struct ixgbe_hw *hw = &adapter->hw;
884 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
886 if (eicr & IXGBE_EICR_LSC) {
888 if (!test_bit(__IXGBE_DOWN, &adapter->state))
889 mod_timer(&adapter->watchdog_timer, jiffies);
892 if (!test_bit(__IXGBE_DOWN, &adapter->state))
893 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
898 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
900 struct ixgbe_q_vector *q_vector = data;
901 struct ixgbe_adapter *adapter = q_vector->adapter;
902 struct ixgbe_ring *txr;
905 if (!q_vector->txr_count)
908 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
909 for (i = 0; i < q_vector->txr_count; i++) {
910 txr = &(adapter->tx_ring[r_idx]);
912 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
913 ixgbe_update_tx_dca(adapter, txr);
915 txr->total_bytes = 0;
916 txr->total_packets = 0;
917 ixgbe_clean_tx_irq(adapter, txr);
918 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
926 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
928 * @data: pointer to our q_vector struct for this interrupt vector
930 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
932 struct ixgbe_q_vector *q_vector = data;
933 struct ixgbe_adapter *adapter = q_vector->adapter;
934 struct ixgbe_ring *rxr;
937 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
938 if (!q_vector->rxr_count)
941 rxr = &(adapter->rx_ring[r_idx]);
942 /* disable interrupts on this vector only */
943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
944 rxr->total_bytes = 0;
945 rxr->total_packets = 0;
946 netif_rx_schedule(adapter->netdev, &q_vector->napi);
951 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
953 ixgbe_msix_clean_rx(irq, data);
954 ixgbe_msix_clean_tx(irq, data);
960 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
961 * @napi: napi struct with our devices info in it
962 * @budget: amount of work driver is allowed to do this pass, in packets
965 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
967 struct ixgbe_q_vector *q_vector =
968 container_of(napi, struct ixgbe_q_vector, napi);
969 struct ixgbe_adapter *adapter = q_vector->adapter;
970 struct ixgbe_ring *rxr;
974 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
975 rxr = &(adapter->rx_ring[r_idx]);
977 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
978 ixgbe_update_rx_dca(adapter, rxr);
981 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
983 /* If all Rx work done, exit the polling mode */
984 if (work_done < budget) {
985 netif_rx_complete(adapter->netdev, napi);
986 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
987 ixgbe_set_itr_msix(q_vector);
988 if (!test_bit(__IXGBE_DOWN, &adapter->state))
989 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
995 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
998 a->q_vector[v_idx].adapter = a;
999 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1000 a->q_vector[v_idx].rxr_count++;
1001 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1004 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1007 a->q_vector[v_idx].adapter = a;
1008 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1009 a->q_vector[v_idx].txr_count++;
1010 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1014 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1015 * @adapter: board private structure to initialize
1016 * @vectors: allotted vector count for descriptor rings
1018 * This function maps descriptor rings to the queue-specific vectors
1019 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1020 * one vector per ring/queue, but on a constrained vector budget, we
1021 * group the rings as "efficiently" as possible. You would add new
1022 * mapping configurations in here.
1024 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1028 int rxr_idx = 0, txr_idx = 0;
1029 int rxr_remaining = adapter->num_rx_queues;
1030 int txr_remaining = adapter->num_tx_queues;
1035 /* No mapping required if MSI-X is disabled. */
1036 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1040 * The ideal configuration...
1041 * We have enough vectors to map one per queue.
1043 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1044 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1045 map_vector_to_rxq(adapter, v_start, rxr_idx);
1047 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1048 map_vector_to_txq(adapter, v_start, txr_idx);
1054 * If we don't have enough vectors for a 1-to-1
1055 * mapping, we'll have to group them so there are
1056 * multiple queues per vector.
1058 /* Re-adjusting *qpv takes care of the remainder. */
1059 for (i = v_start; i < vectors; i++) {
1060 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1061 for (j = 0; j < rqpv; j++) {
1062 map_vector_to_rxq(adapter, i, rxr_idx);
1067 for (i = v_start; i < vectors; i++) {
1068 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1069 for (j = 0; j < tqpv; j++) {
1070 map_vector_to_txq(adapter, i, txr_idx);
1081 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1082 * @adapter: board private structure
1084 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1085 * interrupts from the kernel.
1087 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1089 struct net_device *netdev = adapter->netdev;
1090 irqreturn_t (*handler)(int, void *);
1091 int i, vector, q_vectors, err;
1093 /* Decrement for Other and TCP Timer vectors */
1094 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1096 /* Map the Tx/Rx rings to the vectors we were allotted. */
1097 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1101 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1102 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1103 &ixgbe_msix_clean_many)
1104 for (vector = 0; vector < q_vectors; vector++) {
1105 handler = SET_HANDLER(&adapter->q_vector[vector]);
1106 sprintf(adapter->name[vector], "%s:v%d-%s",
1107 netdev->name, vector,
1108 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1109 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1110 err = request_irq(adapter->msix_entries[vector].vector,
1111 handler, 0, adapter->name[vector],
1112 &(adapter->q_vector[vector]));
1115 "request_irq failed for MSIX interrupt "
1116 "Error: %d\n", err);
1117 goto free_queue_irqs;
1121 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1122 err = request_irq(adapter->msix_entries[vector].vector,
1123 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1126 "request_irq for msix_lsc failed: %d\n", err);
1127 goto free_queue_irqs;
1133 for (i = vector - 1; i >= 0; i--)
1134 free_irq(adapter->msix_entries[--vector].vector,
1135 &(adapter->q_vector[i]));
1136 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1137 pci_disable_msix(adapter->pdev);
1138 kfree(adapter->msix_entries);
1139 adapter->msix_entries = NULL;
1144 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1146 struct ixgbe_hw *hw = &adapter->hw;
1147 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1149 u32 new_itr = q_vector->eitr;
1150 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1151 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1153 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
1155 tx_ring->total_packets,
1156 tx_ring->total_bytes);
1157 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
1159 rx_ring->total_packets,
1160 rx_ring->total_bytes);
1162 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
1164 switch (current_itr) {
1165 /* counts and packets in update_itr are dependent on these numbers */
1166 case lowest_latency:
1170 new_itr = 20000; /* aka hwitr = ~200 */
1179 if (new_itr != q_vector->eitr) {
1181 /* do an exponential smoothing */
1182 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1183 q_vector->eitr = new_itr;
1184 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1185 /* must write high and low 16 bits to reset counter */
1186 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1192 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1195 * ixgbe_intr - legacy mode Interrupt Handler
1196 * @irq: interrupt number
1197 * @data: pointer to a network interface device structure
1198 * @pt_regs: CPU registers structure
1200 static irqreturn_t ixgbe_intr(int irq, void *data)
1202 struct net_device *netdev = data;
1203 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1204 struct ixgbe_hw *hw = &adapter->hw;
1208 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1209 * therefore no explict interrupt disable is necessary */
1210 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1212 return IRQ_NONE; /* Not our interrupt */
1214 if (eicr & IXGBE_EICR_LSC) {
1216 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1217 mod_timer(&adapter->watchdog_timer, jiffies);
1221 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1222 adapter->tx_ring[0].total_packets = 0;
1223 adapter->tx_ring[0].total_bytes = 0;
1224 adapter->rx_ring[0].total_packets = 0;
1225 adapter->rx_ring[0].total_bytes = 0;
1226 /* would disable interrupts here but EIAM disabled it */
1227 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
1233 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1235 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1237 for (i = 0; i < q_vectors; i++) {
1238 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1239 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1240 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1241 q_vector->rxr_count = 0;
1242 q_vector->txr_count = 0;
1247 * ixgbe_request_irq - initialize interrupts
1248 * @adapter: board private structure
1250 * Attempts to configure interrupts using the best available
1251 * capabilities of the hardware and kernel.
1253 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1255 struct net_device *netdev = adapter->netdev;
1258 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1259 err = ixgbe_request_msix_irqs(adapter);
1260 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1261 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1262 netdev->name, netdev);
1264 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1265 netdev->name, netdev);
1269 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1274 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1276 struct net_device *netdev = adapter->netdev;
1278 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1281 q_vectors = adapter->num_msix_vectors;
1284 free_irq(adapter->msix_entries[i].vector, netdev);
1287 for (; i >= 0; i--) {
1288 free_irq(adapter->msix_entries[i].vector,
1289 &(adapter->q_vector[i]));
1292 ixgbe_reset_q_vectors(adapter);
1294 free_irq(adapter->pdev->irq, netdev);
1299 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1300 * @adapter: board private structure
1302 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1304 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1305 IXGBE_WRITE_FLUSH(&adapter->hw);
1306 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1308 for (i = 0; i < adapter->num_msix_vectors; i++)
1309 synchronize_irq(adapter->msix_entries[i].vector);
1311 synchronize_irq(adapter->pdev->irq);
1316 * ixgbe_irq_enable - Enable default interrupt generation settings
1317 * @adapter: board private structure
1319 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1322 mask = IXGBE_EIMS_ENABLE_MASK;
1323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1324 IXGBE_WRITE_FLUSH(&adapter->hw);
1328 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1331 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1333 struct ixgbe_hw *hw = &adapter->hw;
1335 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1336 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
1338 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1339 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1341 map_vector_to_rxq(adapter, 0, 0);
1342 map_vector_to_txq(adapter, 0, 0);
1344 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
1348 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
1349 * @adapter: board private structure
1351 * Configure the Tx unit of the MAC after a reset.
1353 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1356 struct ixgbe_hw *hw = &adapter->hw;
1357 u32 i, j, tdlen, txctrl;
1359 /* Setup the HW Tx Head and Tail descriptor pointers */
1360 for (i = 0; i < adapter->num_tx_queues; i++) {
1361 j = adapter->tx_ring[i].reg_idx;
1362 tdba = adapter->tx_ring[i].dma;
1363 tdlen = adapter->tx_ring[i].count *
1364 sizeof(union ixgbe_adv_tx_desc);
1365 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1366 (tdba & DMA_32BIT_MASK));
1367 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1368 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1369 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1370 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1371 adapter->tx_ring[i].head = IXGBE_TDH(j);
1372 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1373 /* Disable Tx Head Writeback RO bit, since this hoses
1374 * bookkeeping if things aren't delivered in order.
1376 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1377 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1378 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
1382 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1383 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1385 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1387 * ixgbe_get_skb_hdr - helper function for LRO header processing
1388 * @skb: pointer to sk_buff to be added to LRO packet
1389 * @iphdr: pointer to tcp header structure
1390 * @tcph: pointer to tcp header structure
1391 * @hdr_flags: pointer to header flags
1392 * @priv: private data
1394 static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1395 u64 *hdr_flags, void *priv)
1397 union ixgbe_adv_rx_desc *rx_desc = priv;
1399 /* Verify that this is a valid IPv4 TCP packet */
1400 if (!(rx_desc->wb.lower.lo_dword.pkt_info &
1401 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
1404 /* Set network headers */
1405 skb_reset_network_header(skb);
1406 skb_set_transport_header(skb, ip_hdrlen(skb));
1407 *iphdr = ip_hdr(skb);
1408 *tcph = tcp_hdr(skb);
1409 *hdr_flags = LRO_IPV4 | LRO_TCP;
1414 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
1415 * @adapter: board private structure
1417 * Configure the Rx unit of the MAC after a reset.
1419 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1422 struct ixgbe_hw *hw = &adapter->hw;
1423 struct net_device *netdev = adapter->netdev;
1424 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1426 u32 rdlen, rxctrl, rxcsum;
1430 u32 reta = 0, mrqc, srrctl;
1432 /* Decide whether to use packet split mode or not */
1433 if (netdev->mtu > ETH_DATA_LEN)
1434 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1436 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1438 /* Set the RX buffer length according to the mode */
1439 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1440 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
1442 if (netdev->mtu <= ETH_DATA_LEN)
1443 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1445 adapter->rx_buf_len = ALIGN(max_frame, 1024);
1448 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1449 fctrl |= IXGBE_FCTRL_BAM;
1450 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
1451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1453 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1454 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1455 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1457 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1458 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1460 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1462 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
1463 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1464 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1466 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1467 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1468 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1469 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1470 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1471 IXGBE_SRRCTL_BSIZEHDR_MASK);
1473 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1475 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1477 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1480 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
1484 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1485 /* disable receives while setting up the descriptors */
1486 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1487 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1489 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1490 * the Base and Length of the Rx Descriptor Ring */
1491 for (i = 0; i < adapter->num_rx_queues; i++) {
1492 rdba = adapter->rx_ring[i].dma;
1493 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
1494 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
1495 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
1496 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
1497 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
1498 adapter->rx_ring[i].head = IXGBE_RDH(i);
1499 adapter->rx_ring[i].tail = IXGBE_RDT(i);
1502 /* Intitial LRO Settings */
1503 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1504 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1505 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1506 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1507 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1508 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1509 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1510 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1511 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1513 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1514 /* Fill out redirection table */
1515 for (i = 0, j = 0; i < 128; i++, j++) {
1516 if (j == adapter->ring_feature[RING_F_RSS].indices)
1518 /* reta = 4-byte sliding window of
1519 * 0x00..(indices-1)(indices-1)00..etc. */
1520 reta = (reta << 8) | (j * 0x11);
1522 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1525 /* Fill out hash function seeds */
1526 /* XXX use a random constant here to glue certain flows */
1527 get_random_bytes(&random[0], 40);
1528 for (i = 0; i < 10; i++)
1529 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
1531 mrqc = IXGBE_MRQC_RSSEN
1532 /* Perform hash on these packet types */
1533 | IXGBE_MRQC_RSS_FIELD_IPV4
1534 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1535 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1536 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1537 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1538 | IXGBE_MRQC_RSS_FIELD_IPV6
1539 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1540 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1541 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1542 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1545 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1547 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1548 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1549 /* Disable indicating checksum in descriptor, enables
1551 rxcsum |= IXGBE_RXCSUM_PCSD;
1553 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1554 /* Enable IPv4 payload checksum for UDP fragments
1555 * if PCSD is not set */
1556 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1559 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1562 static void ixgbe_vlan_rx_register(struct net_device *netdev,
1563 struct vlan_group *grp)
1565 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1568 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1569 ixgbe_irq_disable(adapter);
1570 adapter->vlgrp = grp;
1573 /* enable VLAN tag insert/strip */
1574 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1575 ctrl |= IXGBE_VLNCTRL_VME;
1576 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1577 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1580 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1581 ixgbe_irq_enable(adapter);
1584 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1586 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1588 /* add VID to filter table */
1589 ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1592 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1594 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1596 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1597 ixgbe_irq_disable(adapter);
1599 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1601 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1602 ixgbe_irq_enable(adapter);
1604 /* remove VID from filter table */
1605 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1608 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1610 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1612 if (adapter->vlgrp) {
1614 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1615 if (!vlan_group_get_device(adapter->vlgrp, vid))
1617 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1623 * ixgbe_set_multi - Multicast and Promiscuous mode set
1624 * @netdev: network interface device structure
1626 * The set_multi entry point is called whenever the multicast address
1627 * list or the network interface flags are updated. This routine is
1628 * responsible for configuring the hardware for proper multicast,
1629 * promiscuous mode, and all-multi behavior.
1631 static void ixgbe_set_multi(struct net_device *netdev)
1633 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1634 struct ixgbe_hw *hw = &adapter->hw;
1635 struct dev_mc_list *mc_ptr;
1640 /* Check for Promiscuous and All Multicast modes */
1642 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1644 if (netdev->flags & IFF_PROMISC) {
1645 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1646 fctrl &= ~IXGBE_VLNCTRL_VFE;
1648 if (netdev->flags & IFF_ALLMULTI) {
1649 fctrl |= IXGBE_FCTRL_MPE;
1650 fctrl &= ~IXGBE_FCTRL_UPE;
1652 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1654 fctrl |= IXGBE_VLNCTRL_VFE;
1657 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1659 if (netdev->mc_count) {
1660 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
1664 /* Shared function expects packed array of only addresses. */
1665 mc_ptr = netdev->mc_list;
1667 for (i = 0; i < netdev->mc_count; i++) {
1670 memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
1672 mc_ptr = mc_ptr->next;
1675 ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
1678 ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
1683 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1686 struct ixgbe_q_vector *q_vector;
1687 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1689 /* legacy and MSI only use one vector */
1690 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1693 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1694 q_vector = &adapter->q_vector[q_idx];
1695 if (!q_vector->rxr_count)
1697 napi_enable(&q_vector->napi);
1701 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1704 struct ixgbe_q_vector *q_vector;
1705 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1707 /* legacy and MSI only use one vector */
1708 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1711 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1712 q_vector = &adapter->q_vector[q_idx];
1713 if (!q_vector->rxr_count)
1715 napi_disable(&q_vector->napi);
1719 static void ixgbe_configure(struct ixgbe_adapter *adapter)
1721 struct net_device *netdev = adapter->netdev;
1724 ixgbe_set_multi(netdev);
1726 ixgbe_restore_vlan(adapter);
1728 ixgbe_configure_tx(adapter);
1729 ixgbe_configure_rx(adapter);
1730 for (i = 0; i < adapter->num_rx_queues; i++)
1731 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1732 (adapter->rx_ring[i].count - 1));
1735 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1737 struct net_device *netdev = adapter->netdev;
1738 struct ixgbe_hw *hw = &adapter->hw;
1740 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1741 u32 txdctl, rxdctl, mhadd;
1744 ixgbe_get_hw_control(adapter);
1746 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1747 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1748 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1749 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1750 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1755 /* XXX: to interrupt immediately for EICS writes, enable this */
1756 /* gpie |= IXGBE_GPIE_EIMEN; */
1757 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1760 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1761 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1762 * specifically only auto mask tx and rx interrupts */
1763 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1766 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1767 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1768 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1769 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1771 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1774 for (i = 0; i < adapter->num_tx_queues; i++) {
1775 j = adapter->tx_ring[i].reg_idx;
1776 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1777 txdctl |= IXGBE_TXDCTL_ENABLE;
1778 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1781 for (i = 0; i < adapter->num_rx_queues; i++) {
1782 j = adapter->rx_ring[i].reg_idx;
1783 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1784 /* enable PTHRESH=32 descriptors (half the internal cache)
1785 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1786 * this also removes a pesky rx_no_buffer_count increment */
1788 rxdctl |= IXGBE_RXDCTL_ENABLE;
1789 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1791 /* enable all receives */
1792 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1793 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1794 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1796 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1797 ixgbe_configure_msix(adapter);
1799 ixgbe_configure_msi_and_legacy(adapter);
1801 clear_bit(__IXGBE_DOWN, &adapter->state);
1802 ixgbe_napi_enable_all(adapter);
1804 /* clear any pending interrupts, may auto mask */
1805 IXGBE_READ_REG(hw, IXGBE_EICR);
1807 ixgbe_irq_enable(adapter);
1809 /* bring the link up in the watchdog, this could race with our first
1810 * link up interrupt but shouldn't be a problem */
1811 mod_timer(&adapter->watchdog_timer, jiffies);
1815 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1817 WARN_ON(in_interrupt());
1818 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1820 ixgbe_down(adapter);
1822 clear_bit(__IXGBE_RESETTING, &adapter->state);
1825 int ixgbe_up(struct ixgbe_adapter *adapter)
1827 /* hardware has been reset, we need to reload some things */
1828 ixgbe_configure(adapter);
1830 return ixgbe_up_complete(adapter);
1833 void ixgbe_reset(struct ixgbe_adapter *adapter)
1835 if (ixgbe_init_hw(&adapter->hw))
1836 DPRINTK(PROBE, ERR, "Hardware Error\n");
1838 /* reprogram the RAR[0] in case user changed it. */
1839 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1844 static int ixgbe_resume(struct pci_dev *pdev)
1846 struct net_device *netdev = pci_get_drvdata(pdev);
1847 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1850 pci_set_power_state(pdev, PCI_D0);
1851 pci_restore_state(pdev);
1852 err = pci_enable_device(pdev);
1854 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1858 pci_set_master(pdev);
1860 pci_enable_wake(pdev, PCI_D3hot, 0);
1861 pci_enable_wake(pdev, PCI_D3cold, 0);
1863 if (netif_running(netdev)) {
1864 err = ixgbe_request_irq(adapter);
1869 ixgbe_reset(adapter);
1871 if (netif_running(netdev))
1874 netif_device_attach(netdev);
1881 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1882 * @adapter: board private structure
1883 * @rx_ring: ring to free buffers from
1885 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1886 struct ixgbe_ring *rx_ring)
1888 struct pci_dev *pdev = adapter->pdev;
1892 /* Free all the Rx ring sk_buffs */
1894 for (i = 0; i < rx_ring->count; i++) {
1895 struct ixgbe_rx_buffer *rx_buffer_info;
1897 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1898 if (rx_buffer_info->dma) {
1899 pci_unmap_single(pdev, rx_buffer_info->dma,
1900 adapter->rx_buf_len,
1901 PCI_DMA_FROMDEVICE);
1902 rx_buffer_info->dma = 0;
1904 if (rx_buffer_info->skb) {
1905 dev_kfree_skb(rx_buffer_info->skb);
1906 rx_buffer_info->skb = NULL;
1908 if (!rx_buffer_info->page)
1910 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
1911 PCI_DMA_FROMDEVICE);
1912 rx_buffer_info->page_dma = 0;
1914 put_page(rx_buffer_info->page);
1915 rx_buffer_info->page = NULL;
1918 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
1919 memset(rx_ring->rx_buffer_info, 0, size);
1921 /* Zero out the descriptor ring */
1922 memset(rx_ring->desc, 0, rx_ring->size);
1924 rx_ring->next_to_clean = 0;
1925 rx_ring->next_to_use = 0;
1927 writel(0, adapter->hw.hw_addr + rx_ring->head);
1928 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1932 * ixgbe_clean_tx_ring - Free Tx Buffers
1933 * @adapter: board private structure
1934 * @tx_ring: ring to be cleaned
1936 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1937 struct ixgbe_ring *tx_ring)
1939 struct ixgbe_tx_buffer *tx_buffer_info;
1943 /* Free all the Tx ring sk_buffs */
1945 for (i = 0; i < tx_ring->count; i++) {
1946 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1947 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1950 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
1951 memset(tx_ring->tx_buffer_info, 0, size);
1953 /* Zero out the descriptor ring */
1954 memset(tx_ring->desc, 0, tx_ring->size);
1956 tx_ring->next_to_use = 0;
1957 tx_ring->next_to_clean = 0;
1959 writel(0, adapter->hw.hw_addr + tx_ring->head);
1960 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1964 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1965 * @adapter: board private structure
1967 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1971 for (i = 0; i < adapter->num_rx_queues; i++)
1972 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1976 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1977 * @adapter: board private structure
1979 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1983 for (i = 0; i < adapter->num_tx_queues; i++)
1984 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1987 void ixgbe_down(struct ixgbe_adapter *adapter)
1989 struct net_device *netdev = adapter->netdev;
1992 /* signal that we are down to the interrupt handler */
1993 set_bit(__IXGBE_DOWN, &adapter->state);
1995 /* disable receives */
1996 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1997 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
1998 rxctrl & ~IXGBE_RXCTRL_RXEN);
2000 netif_tx_disable(netdev);
2002 /* disable transmits in the hardware */
2004 /* flush both disables */
2005 IXGBE_WRITE_FLUSH(&adapter->hw);
2008 ixgbe_irq_disable(adapter);
2010 ixgbe_napi_disable_all(adapter);
2011 del_timer_sync(&adapter->watchdog_timer);
2013 netif_carrier_off(netdev);
2014 netif_tx_stop_all_queues(netdev);
2016 if (!pci_channel_offline(adapter->pdev))
2017 ixgbe_reset(adapter);
2018 ixgbe_clean_all_tx_rings(adapter);
2019 ixgbe_clean_all_rx_rings(adapter);
2023 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2025 struct net_device *netdev = pci_get_drvdata(pdev);
2026 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2031 netif_device_detach(netdev);
2033 if (netif_running(netdev)) {
2034 ixgbe_down(adapter);
2035 ixgbe_free_irq(adapter);
2039 retval = pci_save_state(pdev);
2044 pci_enable_wake(pdev, PCI_D3hot, 0);
2045 pci_enable_wake(pdev, PCI_D3cold, 0);
2047 ixgbe_release_hw_control(adapter);
2049 pci_disable_device(pdev);
2051 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2056 static void ixgbe_shutdown(struct pci_dev *pdev)
2058 ixgbe_suspend(pdev, PMSG_SUSPEND);
2062 * ixgbe_poll - NAPI Rx polling callback
2063 * @napi: structure for representing this polling device
2064 * @budget: how many packets driver is allowed to clean
2066 * This function is used for legacy and MSI, NAPI mode
2068 static int ixgbe_poll(struct napi_struct *napi, int budget)
2070 struct ixgbe_q_vector *q_vector = container_of(napi,
2071 struct ixgbe_q_vector, napi);
2072 struct ixgbe_adapter *adapter = q_vector->adapter;
2073 int tx_cleaned = 0, work_done = 0;
2076 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2077 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2078 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2082 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2083 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
2088 /* If budget not fully consumed, exit the polling mode */
2089 if (work_done < budget) {
2090 netif_rx_complete(adapter->netdev, napi);
2091 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2092 ixgbe_set_itr(adapter);
2093 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2094 ixgbe_irq_enable(adapter);
2101 * ixgbe_tx_timeout - Respond to a Tx Hang
2102 * @netdev: network interface device structure
2104 static void ixgbe_tx_timeout(struct net_device *netdev)
2106 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2108 /* Do the reset outside of interrupt context */
2109 schedule_work(&adapter->reset_task);
2112 static void ixgbe_reset_task(struct work_struct *work)
2114 struct ixgbe_adapter *adapter;
2115 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2117 adapter->tx_timeout_count++;
2119 ixgbe_reinit_locked(adapter);
2122 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2125 int err, vector_threshold;
2127 /* We'll want at least 3 (vector_threshold):
2130 * 3) Other (Link Status Change, etc.)
2131 * 4) TCP Timer (optional)
2133 vector_threshold = MIN_MSIX_COUNT;
2135 /* The more we get, the more we will assign to Tx/Rx Cleanup
2136 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2137 * Right now, we simply care about how many we'll get; we'll
2138 * set them up later while requesting irq's.
2140 while (vectors >= vector_threshold) {
2141 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2143 if (!err) /* Success in acquiring all requested vectors. */
2146 vectors = 0; /* Nasty failure, quit now */
2147 else /* err == number of vectors we should try again with */
2151 if (vectors < vector_threshold) {
2152 /* Can't allocate enough MSI-X interrupts? Oh well.
2153 * This just means we'll go with either a single MSI
2154 * vector or fall back to legacy interrupts.
2156 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2157 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2158 kfree(adapter->msix_entries);
2159 adapter->msix_entries = NULL;
2160 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2161 adapter->num_tx_queues = 1;
2162 adapter->num_rx_queues = 1;
2164 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2165 adapter->num_msix_vectors = vectors;
2169 static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2172 int feature_mask = 0, rss_i, rss_m;
2174 /* Number of supported queues */
2175 switch (adapter->hw.mac.type) {
2176 case ixgbe_mac_82598EB:
2177 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2179 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2181 switch (adapter->flags & feature_mask) {
2182 case (IXGBE_FLAG_RSS_ENABLED):
2196 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2197 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2205 adapter->num_rx_queues = nrq;
2206 adapter->num_tx_queues = ntq;
2210 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2211 * @adapter: board private structure to initialize
2213 * Once we know the feature-set enabled for the device, we'll cache
2214 * the register offset the descriptor ring is assigned to.
2216 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2218 /* TODO: Remove all uses of the indices in the cases where multiple
2219 * features are OR'd together, if the feature set makes sense.
2221 int feature_mask = 0, rss_i;
2222 int i, txr_idx, rxr_idx;
2224 /* Number of supported queues */
2225 switch (adapter->hw.mac.type) {
2226 case ixgbe_mac_82598EB:
2227 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2230 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2231 switch (adapter->flags & feature_mask) {
2232 case (IXGBE_FLAG_RSS_ENABLED):
2233 for (i = 0; i < adapter->num_rx_queues; i++)
2234 adapter->rx_ring[i].reg_idx = i;
2235 for (i = 0; i < adapter->num_tx_queues; i++)
2236 adapter->tx_ring[i].reg_idx = i;
2249 * ixgbe_alloc_queues - Allocate memory for all rings
2250 * @adapter: board private structure to initialize
2252 * We allocate one ring per queue at run-time since we don't know the
2253 * number of queues at compile-time. The polling_netdev array is
2254 * intended for Multiqueue, but should work fine with a single queue.
2256 static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2260 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2261 sizeof(struct ixgbe_ring), GFP_KERNEL);
2262 if (!adapter->tx_ring)
2263 goto err_tx_ring_allocation;
2265 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2266 sizeof(struct ixgbe_ring), GFP_KERNEL);
2267 if (!adapter->rx_ring)
2268 goto err_rx_ring_allocation;
2270 for (i = 0; i < adapter->num_tx_queues; i++) {
2271 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2272 adapter->tx_ring[i].queue_index = i;
2274 for (i = 0; i < adapter->num_rx_queues; i++) {
2275 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2276 adapter->rx_ring[i].queue_index = i;
2279 ixgbe_cache_ring_register(adapter);
2283 err_rx_ring_allocation:
2284 kfree(adapter->tx_ring);
2285 err_tx_ring_allocation:
2290 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2291 * @adapter: board private structure to initialize
2293 * Attempt to configure the interrupts using the best available
2294 * capabilities of the hardware and the kernel.
2296 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2300 int vector, v_budget;
2303 * It's easy to be greedy for MSI-X vectors, but it really
2304 * doesn't do us much good if we have a lot more vectors
2305 * than CPU's. So let's be conservative and only ask for
2306 * (roughly) twice the number of vectors as there are CPU's.
2308 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2309 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2312 * At the same time, hardware can only support a maximum of
2313 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2314 * we can easily reach upwards of 64 Rx descriptor queues and
2315 * 32 Tx queues. Thus, we cap it off in those rare cases where
2316 * the cpu count also exceeds our vector limit.
2318 v_budget = min(v_budget, MAX_MSIX_COUNT);
2320 /* A failure in MSI-X entry allocation isn't fatal, but it does
2321 * mean we disable MSI-X capabilities of the adapter. */
2322 adapter->msix_entries = kcalloc(v_budget,
2323 sizeof(struct msix_entry), GFP_KERNEL);
2324 if (!adapter->msix_entries) {
2325 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2326 ixgbe_set_num_queues(adapter);
2327 kfree(adapter->tx_ring);
2328 kfree(adapter->rx_ring);
2329 err = ixgbe_alloc_queues(adapter);
2331 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2339 for (vector = 0; vector < v_budget; vector++)
2340 adapter->msix_entries[vector].entry = vector;
2342 ixgbe_acquire_msix_vectors(adapter, v_budget);
2344 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2348 err = pci_enable_msi(adapter->pdev);
2350 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2352 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2353 "falling back to legacy. Error: %d\n", err);
2359 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2360 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2365 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2367 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2368 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2369 pci_disable_msix(adapter->pdev);
2370 kfree(adapter->msix_entries);
2371 adapter->msix_entries = NULL;
2372 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2373 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2374 pci_disable_msi(adapter->pdev);
2380 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2381 * @adapter: board private structure to initialize
2383 * We determine which interrupt scheme to use based on...
2384 * - Kernel support (MSI, MSI-X)
2385 * - which can be user-defined (via MODULE_PARAM)
2386 * - Hardware queue count (num_*_queues)
2387 * - defined by miscellaneous hardware support/features (RSS, etc.)
2389 static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2393 /* Number of supported queues */
2394 ixgbe_set_num_queues(adapter);
2396 err = ixgbe_alloc_queues(adapter);
2398 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2399 goto err_alloc_queues;
2402 err = ixgbe_set_interrupt_capability(adapter);
2404 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2405 goto err_set_interrupt;
2408 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2409 "Tx Queue count = %u\n",
2410 (adapter->num_rx_queues > 1) ? "Enabled" :
2411 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2413 set_bit(__IXGBE_DOWN, &adapter->state);
2418 kfree(adapter->tx_ring);
2419 kfree(adapter->rx_ring);
2425 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2426 * @adapter: board private structure to initialize
2428 * ixgbe_sw_init initializes the Adapter private data structure.
2429 * Fields are initialized based on PCI device information and
2430 * OS network device settings (MTU size).
2432 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2434 struct ixgbe_hw *hw = &adapter->hw;
2435 struct pci_dev *pdev = adapter->pdev;
2438 /* Set capability flags */
2439 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2440 adapter->ring_feature[RING_F_RSS].indices = rss;
2441 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2443 /* Enable Dynamic interrupt throttling by default */
2444 adapter->rx_eitr = 1;
2445 adapter->tx_eitr = 1;
2447 /* default flow control settings */
2448 hw->fc.original_type = ixgbe_fc_full;
2449 hw->fc.type = ixgbe_fc_full;
2451 /* select 10G link by default */
2452 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2453 if (hw->mac.ops.reset(hw)) {
2454 dev_err(&pdev->dev, "HW Init failed\n");
2457 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2459 dev_err(&pdev->dev, "Link Speed setup failed\n");
2463 /* initialize eeprom parameters */
2464 if (ixgbe_init_eeprom(hw)) {
2465 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2469 /* enable rx csum by default */
2470 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2472 set_bit(__IXGBE_DOWN, &adapter->state);
2478 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2479 * @adapter: board private structure
2480 * @txdr: tx descriptor ring (for a specific queue) to setup
2482 * Return 0 on success, negative on failure
2484 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2485 struct ixgbe_ring *txdr)
2487 struct pci_dev *pdev = adapter->pdev;
2490 size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
2491 txdr->tx_buffer_info = vmalloc(size);
2492 if (!txdr->tx_buffer_info) {
2494 "Unable to allocate memory for the transmit descriptor ring\n");
2497 memset(txdr->tx_buffer_info, 0, size);
2499 /* round up to nearest 4K */
2500 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
2501 txdr->size = ALIGN(txdr->size, 4096);
2503 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2505 vfree(txdr->tx_buffer_info);
2507 "Memory allocation failed for the tx desc ring\n");
2511 txdr->next_to_use = 0;
2512 txdr->next_to_clean = 0;
2513 txdr->work_limit = txdr->count;
2519 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2520 * @adapter: board private structure
2521 * @rxdr: rx descriptor ring (for a specific queue) to setup
2523 * Returns 0 on success, negative on failure
2525 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2526 struct ixgbe_ring *rxdr)
2528 struct pci_dev *pdev = adapter->pdev;
2531 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2532 rxdr->lro_mgr.lro_arr = vmalloc(size);
2533 if (!rxdr->lro_mgr.lro_arr)
2535 memset(rxdr->lro_mgr.lro_arr, 0, size);
2537 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
2538 rxdr->rx_buffer_info = vmalloc(size);
2539 if (!rxdr->rx_buffer_info) {
2541 "vmalloc allocation failed for the rx desc ring\n");
2544 memset(rxdr->rx_buffer_info, 0, size);
2546 /* Round up to nearest 4K */
2547 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
2548 rxdr->size = ALIGN(rxdr->size, 4096);
2550 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
2554 "Memory allocation failed for the rx desc ring\n");
2555 vfree(rxdr->rx_buffer_info);
2559 rxdr->next_to_clean = 0;
2560 rxdr->next_to_use = 0;
2565 vfree(rxdr->lro_mgr.lro_arr);
2566 rxdr->lro_mgr.lro_arr = NULL;
2571 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2572 * @adapter: board private structure
2573 * @tx_ring: Tx descriptor ring for a specific queue
2575 * Free all transmit software resources
2577 static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2578 struct ixgbe_ring *tx_ring)
2580 struct pci_dev *pdev = adapter->pdev;
2582 ixgbe_clean_tx_ring(adapter, tx_ring);
2584 vfree(tx_ring->tx_buffer_info);
2585 tx_ring->tx_buffer_info = NULL;
2587 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2589 tx_ring->desc = NULL;
2593 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2594 * @adapter: board private structure
2596 * Free all transmit software resources
2598 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2602 for (i = 0; i < adapter->num_tx_queues; i++)
2603 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2607 * ixgbe_free_rx_resources - Free Rx Resources
2608 * @adapter: board private structure
2609 * @rx_ring: ring to clean the resources from
2611 * Free all receive software resources
2613 static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2614 struct ixgbe_ring *rx_ring)
2616 struct pci_dev *pdev = adapter->pdev;
2618 vfree(rx_ring->lro_mgr.lro_arr);
2619 rx_ring->lro_mgr.lro_arr = NULL;
2621 ixgbe_clean_rx_ring(adapter, rx_ring);
2623 vfree(rx_ring->rx_buffer_info);
2624 rx_ring->rx_buffer_info = NULL;
2626 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2628 rx_ring->desc = NULL;
2632 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2633 * @adapter: board private structure
2635 * Free all receive software resources
2637 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2641 for (i = 0; i < adapter->num_rx_queues; i++)
2642 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2646 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2647 * @adapter: board private structure
2649 * If this function returns with an error, then it's possible one or
2650 * more of the rings is populated (while the rest are not). It is the
2651 * callers duty to clean those orphaned rings.
2653 * Return 0 on success, negative on failure
2655 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2659 for (i = 0; i < adapter->num_tx_queues; i++) {
2660 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2663 "Allocation for Tx Queue %u failed\n", i);
2672 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2673 * @adapter: board private structure
2675 * If this function returns with an error, then it's possible one or
2676 * more of the rings is populated (while the rest are not). It is the
2677 * callers duty to clean those orphaned rings.
2679 * Return 0 on success, negative on failure
2682 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2686 for (i = 0; i < adapter->num_rx_queues; i++) {
2687 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2690 "Allocation for Rx Queue %u failed\n", i);
2699 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2700 * @netdev: network interface device structure
2701 * @new_mtu: new value for maximum frame size
2703 * Returns 0 on success, negative on failure
2705 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2707 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2708 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2710 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
2711 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2714 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2715 netdev->mtu, new_mtu);
2716 /* must set new MTU before calling down or up */
2717 netdev->mtu = new_mtu;
2719 if (netif_running(netdev))
2720 ixgbe_reinit_locked(adapter);
2726 * ixgbe_open - Called when a network interface is made active
2727 * @netdev: network interface device structure
2729 * Returns 0 on success, negative value on failure
2731 * The open entry point is called when a network interface is made
2732 * active by the system (IFF_UP). At this point all resources needed
2733 * for transmit and receive operations are allocated, the interrupt
2734 * handler is registered with the OS, the watchdog timer is started,
2735 * and the stack is notified that the interface is ready.
2737 static int ixgbe_open(struct net_device *netdev)
2739 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2742 /* disallow open during test */
2743 if (test_bit(__IXGBE_TESTING, &adapter->state))
2746 /* allocate transmit descriptors */
2747 err = ixgbe_setup_all_tx_resources(adapter);
2751 /* allocate receive descriptors */
2752 err = ixgbe_setup_all_rx_resources(adapter);
2756 ixgbe_configure(adapter);
2758 err = ixgbe_request_irq(adapter);
2762 err = ixgbe_up_complete(adapter);
2766 netif_tx_start_all_queues(netdev);
2771 ixgbe_release_hw_control(adapter);
2772 ixgbe_free_irq(adapter);
2774 ixgbe_free_all_rx_resources(adapter);
2776 ixgbe_free_all_tx_resources(adapter);
2778 ixgbe_reset(adapter);
2784 * ixgbe_close - Disables a network interface
2785 * @netdev: network interface device structure
2787 * Returns 0, this is not allowed to fail
2789 * The close entry point is called when an interface is de-activated
2790 * by the OS. The hardware is still under the drivers control, but
2791 * needs to be disabled. A global MAC reset is issued to stop the
2792 * hardware, and all transmit and receive resources are freed.
2794 static int ixgbe_close(struct net_device *netdev)
2796 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2798 ixgbe_down(adapter);
2799 ixgbe_free_irq(adapter);
2801 ixgbe_free_all_tx_resources(adapter);
2802 ixgbe_free_all_rx_resources(adapter);
2804 ixgbe_release_hw_control(adapter);
2810 * ixgbe_update_stats - Update the board statistics counters.
2811 * @adapter: board private structure
2813 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2815 struct ixgbe_hw *hw = &adapter->hw;
2817 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
2819 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2820 for (i = 0; i < 8; i++) {
2821 /* for packet buffers not used, the register should read 0 */
2822 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2824 adapter->stats.mpc[i] += mpc;
2825 total_mpc += adapter->stats.mpc[i];
2826 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2828 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2829 /* work around hardware counting issue */
2830 adapter->stats.gprc -= missed_rx;
2832 /* 82598 hardware only has a 32 bit counter in the high register */
2833 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2834 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2835 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2836 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2837 adapter->stats.bprc += bprc;
2838 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2839 adapter->stats.mprc -= bprc;
2840 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2841 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2842 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2843 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2844 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2845 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2846 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2847 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2848 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2849 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2850 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2851 adapter->stats.lxontxc += lxon;
2852 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2853 adapter->stats.lxofftxc += lxoff;
2854 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2855 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2856 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2858 * 82598 errata - tx of flow control packets is included in tx counters
2860 xon_off_tot = lxon + lxoff;
2861 adapter->stats.gptc -= xon_off_tot;
2862 adapter->stats.mptc -= xon_off_tot;
2863 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
2864 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2865 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2866 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2867 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2868 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2869 adapter->stats.ptc64 -= xon_off_tot;
2870 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2871 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2872 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2873 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2874 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2875 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2877 /* Fill out the OS statistics structure */
2878 adapter->net_stats.multicast = adapter->stats.mprc;
2881 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2882 adapter->stats.rlec;
2883 adapter->net_stats.rx_dropped = 0;
2884 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2885 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2886 adapter->net_stats.rx_missed_errors = total_mpc;
2890 * ixgbe_watchdog - Timer Call-back
2891 * @data: pointer to adapter cast into an unsigned long
2893 static void ixgbe_watchdog(unsigned long data)
2895 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2896 struct net_device *netdev = adapter->netdev;
2900 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
2903 if (!netif_carrier_ok(netdev)) {
2904 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2905 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
2906 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2907 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2908 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2909 "Flow Control: %s\n",
2910 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2912 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2913 "1 Gbps" : "unknown speed")),
2914 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2916 (FLOW_TX ? "TX" : "None"))));
2918 netif_carrier_on(netdev);
2919 netif_tx_wake_all_queues(netdev);
2921 /* Force detection of hung controller */
2922 adapter->detect_tx_hung = true;
2925 if (netif_carrier_ok(netdev)) {
2926 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2927 netif_carrier_off(netdev);
2928 netif_tx_stop_all_queues(netdev);
2932 ixgbe_update_stats(adapter);
2934 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2935 /* Cause software interrupt to ensure rx rings are cleaned */
2936 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2938 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2941 /* for legacy and MSI interrupts don't set any bits that
2942 * are enabled for EIAM, because this operation would
2943 * set *both* EIMS and EICS for any bit in EIAM */
2944 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2945 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2947 /* Reset the timer */
2948 mod_timer(&adapter->watchdog_timer,
2949 round_jiffies(jiffies + 2 * HZ));
2953 static int ixgbe_tso(struct ixgbe_adapter *adapter,
2954 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2955 u32 tx_flags, u8 *hdr_len)
2957 struct ixgbe_adv_tx_context_desc *context_desc;
2960 struct ixgbe_tx_buffer *tx_buffer_info;
2961 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2962 u32 mss_l4len_idx = 0, l4len;
2964 if (skb_is_gso(skb)) {
2965 if (skb_header_cloned(skb)) {
2966 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2970 l4len = tcp_hdrlen(skb);
2973 if (skb->protocol == htons(ETH_P_IP)) {
2974 struct iphdr *iph = ip_hdr(skb);
2977 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2981 adapter->hw_tso_ctxt++;
2982 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2983 ipv6_hdr(skb)->payload_len = 0;
2984 tcp_hdr(skb)->check =
2985 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2986 &ipv6_hdr(skb)->daddr,
2988 adapter->hw_tso6_ctxt++;
2991 i = tx_ring->next_to_use;
2993 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2994 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2996 /* VLAN MACLEN IPLEN */
2997 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2999 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3000 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3001 IXGBE_ADVTXD_MACLEN_SHIFT);
3002 *hdr_len += skb_network_offset(skb);
3004 (skb_transport_header(skb) - skb_network_header(skb));
3006 (skb_transport_header(skb) - skb_network_header(skb));
3007 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3008 context_desc->seqnum_seed = 0;
3010 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3011 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3012 IXGBE_ADVTXD_DTYP_CTXT);
3014 if (skb->protocol == htons(ETH_P_IP))
3015 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3016 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3017 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3021 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3022 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3023 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3025 tx_buffer_info->time_stamp = jiffies;
3026 tx_buffer_info->next_to_watch = i;
3029 if (i == tx_ring->count)
3031 tx_ring->next_to_use = i;
3038 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3039 struct ixgbe_ring *tx_ring,
3040 struct sk_buff *skb, u32 tx_flags)
3042 struct ixgbe_adv_tx_context_desc *context_desc;
3044 struct ixgbe_tx_buffer *tx_buffer_info;
3045 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3047 if (skb->ip_summed == CHECKSUM_PARTIAL ||
3048 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3049 i = tx_ring->next_to_use;
3050 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3051 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3053 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3055 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3056 vlan_macip_lens |= (skb_network_offset(skb) <<
3057 IXGBE_ADVTXD_MACLEN_SHIFT);
3058 if (skb->ip_summed == CHECKSUM_PARTIAL)
3059 vlan_macip_lens |= (skb_transport_header(skb) -
3060 skb_network_header(skb));
3062 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3063 context_desc->seqnum_seed = 0;
3065 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3066 IXGBE_ADVTXD_DTYP_CTXT);
3068 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3069 switch (skb->protocol) {
3070 case __constant_htons(ETH_P_IP):
3071 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3072 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3074 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3077 case __constant_htons(ETH_P_IPV6):
3078 /* XXX what about other V6 headers?? */
3079 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3081 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3085 if (unlikely(net_ratelimit())) {
3086 DPRINTK(PROBE, WARNING,
3087 "partial checksum but proto=%x!\n",
3094 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3095 context_desc->mss_l4len_idx = 0;
3097 tx_buffer_info->time_stamp = jiffies;
3098 tx_buffer_info->next_to_watch = i;
3099 adapter->hw_csum_tx_good++;
3101 if (i == tx_ring->count)
3103 tx_ring->next_to_use = i;
3110 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3111 struct ixgbe_ring *tx_ring,
3112 struct sk_buff *skb, unsigned int first)
3114 struct ixgbe_tx_buffer *tx_buffer_info;
3115 unsigned int len = skb->len;
3116 unsigned int offset = 0, size, count = 0, i;
3117 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3120 len -= skb->data_len;
3122 i = tx_ring->next_to_use;
3125 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3126 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3128 tx_buffer_info->length = size;
3129 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3131 size, PCI_DMA_TODEVICE);
3132 tx_buffer_info->time_stamp = jiffies;
3133 tx_buffer_info->next_to_watch = i;
3139 if (i == tx_ring->count)
3143 for (f = 0; f < nr_frags; f++) {
3144 struct skb_frag_struct *frag;
3146 frag = &skb_shinfo(skb)->frags[f];
3148 offset = frag->page_offset;
3151 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3152 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3154 tx_buffer_info->length = size;
3155 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3158 size, PCI_DMA_TODEVICE);
3159 tx_buffer_info->time_stamp = jiffies;
3160 tx_buffer_info->next_to_watch = i;
3166 if (i == tx_ring->count)
3171 i = tx_ring->count - 1;
3174 tx_ring->tx_buffer_info[i].skb = skb;
3175 tx_ring->tx_buffer_info[first].next_to_watch = i;
3180 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3181 struct ixgbe_ring *tx_ring,
3182 int tx_flags, int count, u32 paylen, u8 hdr_len)
3184 union ixgbe_adv_tx_desc *tx_desc = NULL;
3185 struct ixgbe_tx_buffer *tx_buffer_info;
3186 u32 olinfo_status = 0, cmd_type_len = 0;
3188 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3190 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3192 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3194 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3195 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3197 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3198 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3200 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3201 IXGBE_ADVTXD_POPTS_SHIFT;
3203 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3204 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3205 IXGBE_ADVTXD_POPTS_SHIFT;
3207 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3208 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3209 IXGBE_ADVTXD_POPTS_SHIFT;
3211 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3213 i = tx_ring->next_to_use;
3215 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3216 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3217 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3218 tx_desc->read.cmd_type_len =
3219 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3220 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3223 if (i == tx_ring->count)
3227 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3230 * Force memory writes to complete before letting h/w
3231 * know there are new descriptors to fetch. (Only
3232 * applicable for weak-ordered memory model archs,
3237 tx_ring->next_to_use = i;
3238 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3241 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3242 struct ixgbe_ring *tx_ring, int size)
3244 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3246 netif_stop_subqueue(netdev, tx_ring->queue_index);
3247 /* Herbert's original patch had:
3248 * smp_mb__after_netif_stop_queue();
3249 * but since that doesn't exist yet, just open code it. */
3252 /* We need to check again in a case another CPU has just
3253 * made room available. */
3254 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3257 /* A reprieve! - use start_queue because it doesn't call schedule */
3258 netif_wake_subqueue(netdev, tx_ring->queue_index);
3259 ++adapter->restart_queue;
3263 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3264 struct ixgbe_ring *tx_ring, int size)
3266 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3268 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3272 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3274 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3275 struct ixgbe_ring *tx_ring;
3276 unsigned int len = skb->len;
3278 unsigned int tx_flags = 0;
3281 unsigned int mss = 0;
3284 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3285 len -= skb->data_len;
3286 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3287 tx_ring = &adapter->tx_ring[r_idx];
3290 if (skb->len <= 0) {
3292 return NETDEV_TX_OK;
3294 mss = skb_shinfo(skb)->gso_size;
3298 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3301 count += TXD_USE_COUNT(len);
3302 for (f = 0; f < nr_frags; f++)
3303 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3305 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3307 return NETDEV_TX_BUSY;
3309 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3310 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3311 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3314 if (skb->protocol == htons(ETH_P_IP))
3315 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3316 first = tx_ring->next_to_use;
3317 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3319 dev_kfree_skb_any(skb);
3320 return NETDEV_TX_OK;
3324 tx_flags |= IXGBE_TX_FLAGS_TSO;
3325 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3326 (skb->ip_summed == CHECKSUM_PARTIAL))
3327 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3329 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3330 ixgbe_tx_map(adapter, tx_ring, skb, first),
3333 netdev->trans_start = jiffies;
3335 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3337 return NETDEV_TX_OK;
3341 * ixgbe_get_stats - Get System Network Statistics
3342 * @netdev: network interface device structure
3344 * Returns the address of the device statistics structure.
3345 * The statistics are actually updated from the timer callback.
3347 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3351 /* only return the current stats */
3352 return &adapter->net_stats;
3356 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3357 * @netdev: network interface device structure
3358 * @p: pointer to an address structure
3360 * Returns 0 on success, negative on failure
3362 static int ixgbe_set_mac(struct net_device *netdev, void *p)
3364 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3365 struct sockaddr *addr = p;
3367 if (!is_valid_ether_addr(addr->sa_data))
3368 return -EADDRNOTAVAIL;
3370 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3371 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3373 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3378 #ifdef CONFIG_NET_POLL_CONTROLLER
3380 * Polling 'interrupt' - used by things like netconsole to send skbs
3381 * without having to re-enable interrupts. It's not called while
3382 * the interrupt routine is executing.
3384 static void ixgbe_netpoll(struct net_device *netdev)
3386 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3388 disable_irq(adapter->pdev->irq);
3389 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3390 ixgbe_intr(adapter->pdev->irq, netdev);
3391 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3392 enable_irq(adapter->pdev->irq);
3397 * ixgbe_napi_add_all - prep napi structs for use
3398 * @adapter: private struct
3399 * helper function to napi_add each possible q_vector->napi
3401 static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3403 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3404 int (*poll)(struct napi_struct *, int);
3406 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3407 poll = &ixgbe_clean_rxonly;
3410 /* only one q_vector for legacy modes */
3414 for (i = 0; i < q_vectors; i++) {
3415 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3416 netif_napi_add(adapter->netdev, &q_vector->napi,
3422 * ixgbe_probe - Device Initialization Routine
3423 * @pdev: PCI device information struct
3424 * @ent: entry in ixgbe_pci_tbl
3426 * Returns 0 on success, negative on failure
3428 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3429 * The OS initialization, configuring of the adapter private structure,
3430 * and a hardware reset occur.
3432 static int __devinit ixgbe_probe(struct pci_dev *pdev,
3433 const struct pci_device_id *ent)
3435 struct net_device *netdev;
3436 struct ixgbe_adapter *adapter = NULL;
3437 struct ixgbe_hw *hw;
3438 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3439 unsigned long mmio_start, mmio_len;
3440 static int cards_found;
3441 int i, err, pci_using_dac;
3442 u16 link_status, link_speed, link_width;
3445 err = pci_enable_device(pdev);
3449 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3450 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3453 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3455 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3457 dev_err(&pdev->dev, "No usable DMA "
3458 "configuration, aborting\n");
3465 err = pci_request_regions(pdev, ixgbe_driver_name);
3467 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3471 pci_set_master(pdev);
3472 pci_save_state(pdev);
3474 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3477 goto err_alloc_etherdev;
3480 SET_NETDEV_DEV(netdev, &pdev->dev);
3482 pci_set_drvdata(pdev, netdev);
3483 adapter = netdev_priv(netdev);
3485 adapter->netdev = netdev;
3486 adapter->pdev = pdev;
3489 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3491 mmio_start = pci_resource_start(pdev, 0);
3492 mmio_len = pci_resource_len(pdev, 0);
3494 hw->hw_addr = ioremap(mmio_start, mmio_len);
3500 for (i = 1; i <= 5; i++) {
3501 if (pci_resource_len(pdev, i) == 0)
3505 netdev->open = &ixgbe_open;
3506 netdev->stop = &ixgbe_close;
3507 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3508 netdev->get_stats = &ixgbe_get_stats;
3509 netdev->set_multicast_list = &ixgbe_set_multi;
3510 netdev->set_mac_address = &ixgbe_set_mac;
3511 netdev->change_mtu = &ixgbe_change_mtu;
3512 ixgbe_set_ethtool_ops(netdev);
3513 netdev->tx_timeout = &ixgbe_tx_timeout;
3514 netdev->watchdog_timeo = 5 * HZ;
3515 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3516 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3517 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3518 #ifdef CONFIG_NET_POLL_CONTROLLER
3519 netdev->poll_controller = ixgbe_netpoll;
3521 strcpy(netdev->name, pci_name(pdev));
3523 netdev->mem_start = mmio_start;
3524 netdev->mem_end = mmio_start + mmio_len;
3526 adapter->bd_number = cards_found;
3528 /* PCI config space info */
3529 hw->vendor_id = pdev->vendor;
3530 hw->device_id = pdev->device;
3531 hw->revision_id = pdev->revision;
3532 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3533 hw->subsystem_device_id = pdev->subsystem_device;
3536 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3537 hw->mac.type = ii->mac;
3539 err = ii->get_invariants(hw);
3543 /* setup the private structure */
3544 err = ixgbe_sw_init(adapter);
3548 netdev->features = NETIF_F_SG |
3550 NETIF_F_HW_VLAN_TX |
3551 NETIF_F_HW_VLAN_RX |
3552 NETIF_F_HW_VLAN_FILTER;
3554 netdev->features |= NETIF_F_LRO;
3555 netdev->features |= NETIF_F_TSO;
3556 netdev->features |= NETIF_F_TSO6;
3558 netdev->vlan_features |= NETIF_F_TSO;
3559 netdev->vlan_features |= NETIF_F_TSO6;
3560 netdev->vlan_features |= NETIF_F_HW_CSUM;
3561 netdev->vlan_features |= NETIF_F_SG;
3564 netdev->features |= NETIF_F_HIGHDMA;
3566 /* make sure the EEPROM is good */
3567 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3568 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3573 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3574 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3576 if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3581 init_timer(&adapter->watchdog_timer);
3582 adapter->watchdog_timer.function = &ixgbe_watchdog;
3583 adapter->watchdog_timer.data = (unsigned long)adapter;
3585 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3587 /* initialize default flow control settings */
3588 hw->fc.original_type = ixgbe_fc_full;
3589 hw->fc.type = ixgbe_fc_full;
3590 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3591 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3592 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3594 err = ixgbe_init_interrupt_scheme(adapter);
3598 /* print bus type/speed/width info */
3599 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3600 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3601 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3602 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3603 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3604 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3605 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3607 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3608 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3609 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3610 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3612 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3613 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3614 ixgbe_read_part_num(hw, &part_num);
3615 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3616 hw->mac.type, hw->phy.type,
3617 (part_num >> 8), (part_num & 0xff));
3619 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3620 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3621 "this card is not sufficient for optimal "
3623 dev_warn(&pdev->dev, "For optimal performance a x8 "
3624 "PCI-Express slot is required.\n");
3627 /* reset the hardware with the new settings */
3630 netif_carrier_off(netdev);
3631 netif_tx_stop_all_queues(netdev);
3633 ixgbe_napi_add_all(adapter);
3635 strcpy(netdev->name, "eth%d");
3636 err = register_netdev(netdev);
3641 if (dca_add_requester(&pdev->dev) == 0) {
3642 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3643 /* always use CB2 mode, difference is masked
3644 * in the CB driver */
3645 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3646 ixgbe_setup_dca(adapter);
3650 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3655 ixgbe_release_hw_control(adapter);
3658 ixgbe_reset_interrupt_capability(adapter);
3660 iounmap(hw->hw_addr);
3662 free_netdev(netdev);
3664 pci_release_regions(pdev);
3667 pci_disable_device(pdev);
3672 * ixgbe_remove - Device Removal Routine
3673 * @pdev: PCI device information struct
3675 * ixgbe_remove is called by the PCI subsystem to alert the driver
3676 * that it should release a PCI device. The could be caused by a
3677 * Hot-Plug event, or because the driver is going to be removed from
3680 static void __devexit ixgbe_remove(struct pci_dev *pdev)
3682 struct net_device *netdev = pci_get_drvdata(pdev);
3683 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3685 set_bit(__IXGBE_DOWN, &adapter->state);
3686 del_timer_sync(&adapter->watchdog_timer);
3688 flush_scheduled_work();
3691 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3692 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3693 dca_remove_requester(&pdev->dev);
3694 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3698 unregister_netdev(netdev);
3700 ixgbe_reset_interrupt_capability(adapter);
3702 ixgbe_release_hw_control(adapter);
3704 iounmap(adapter->hw.hw_addr);
3705 pci_release_regions(pdev);
3707 DPRINTK(PROBE, INFO, "complete\n");
3708 kfree(adapter->tx_ring);
3709 kfree(adapter->rx_ring);
3711 free_netdev(netdev);
3713 pci_disable_device(pdev);
3717 * ixgbe_io_error_detected - called when PCI error is detected
3718 * @pdev: Pointer to PCI device
3719 * @state: The current pci connection state
3721 * This function is called after a PCI bus error affecting
3722 * this device has been detected.
3724 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3725 pci_channel_state_t state)
3727 struct net_device *netdev = pci_get_drvdata(pdev);
3728 struct ixgbe_adapter *adapter = netdev->priv;
3730 netif_device_detach(netdev);
3732 if (netif_running(netdev))
3733 ixgbe_down(adapter);
3734 pci_disable_device(pdev);
3736 /* Request a slot slot reset. */
3737 return PCI_ERS_RESULT_NEED_RESET;
3741 * ixgbe_io_slot_reset - called after the pci bus has been reset.
3742 * @pdev: Pointer to PCI device
3744 * Restart the card from scratch, as if from a cold-boot.
3746 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3748 struct net_device *netdev = pci_get_drvdata(pdev);
3749 struct ixgbe_adapter *adapter = netdev->priv;
3751 if (pci_enable_device(pdev)) {
3753 "Cannot re-enable PCI device after reset.\n");
3754 return PCI_ERS_RESULT_DISCONNECT;
3756 pci_set_master(pdev);
3757 pci_restore_state(pdev);
3759 pci_enable_wake(pdev, PCI_D3hot, 0);
3760 pci_enable_wake(pdev, PCI_D3cold, 0);
3762 ixgbe_reset(adapter);
3764 return PCI_ERS_RESULT_RECOVERED;
3768 * ixgbe_io_resume - called when traffic can start flowing again.
3769 * @pdev: Pointer to PCI device
3771 * This callback is called when the error recovery driver tells us that
3772 * its OK to resume normal operation.
3774 static void ixgbe_io_resume(struct pci_dev *pdev)
3776 struct net_device *netdev = pci_get_drvdata(pdev);
3777 struct ixgbe_adapter *adapter = netdev->priv;
3779 if (netif_running(netdev)) {
3780 if (ixgbe_up(adapter)) {
3781 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
3786 netif_device_attach(netdev);
3790 static struct pci_error_handlers ixgbe_err_handler = {
3791 .error_detected = ixgbe_io_error_detected,
3792 .slot_reset = ixgbe_io_slot_reset,
3793 .resume = ixgbe_io_resume,
3796 static struct pci_driver ixgbe_driver = {
3797 .name = ixgbe_driver_name,
3798 .id_table = ixgbe_pci_tbl,
3799 .probe = ixgbe_probe,
3800 .remove = __devexit_p(ixgbe_remove),
3802 .suspend = ixgbe_suspend,
3803 .resume = ixgbe_resume,
3805 .shutdown = ixgbe_shutdown,
3806 .err_handler = &ixgbe_err_handler
3810 * ixgbe_init_module - Driver Registration Routine
3812 * ixgbe_init_module is the first routine called when the driver is
3813 * loaded. All it does is register with the PCI subsystem.
3815 static int __init ixgbe_init_module(void)
3818 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
3819 ixgbe_driver_string, ixgbe_driver_version);
3821 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3824 dca_register_notify(&dca_notifier);
3827 ret = pci_register_driver(&ixgbe_driver);
3830 module_init(ixgbe_init_module);
3833 * ixgbe_exit_module - Driver Exit Cleanup Routine
3835 * ixgbe_exit_module is called just before the driver is removed
3838 static void __exit ixgbe_exit_module(void)
3841 dca_unregister_notify(&dca_notifier);
3843 pci_unregister_driver(&ixgbe_driver);
3847 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3852 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3853 __ixgbe_notify_dca);
3855 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3857 #endif /* CONFIG_DCA */
3859 module_exit(ixgbe_exit_module);