2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
26 * Contact Information:
27 * Xiong Huang <xiong_huang@attansic.com>
28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
31 * Chris Snook <csnook@redhat.com>
32 * Jay Cliburn <jcliburn@gmail.com>
34 * This version is adapted from the Attansic reference driver for
35 * inclusion in the Linux kernel. It is currently under heavy development.
36 * A very incomplete list of things that need to be dealt with:
40 * Add more ethtool functions.
41 * Fix abstruse irq enable/disable condition described here:
42 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
48 * interrupt coalescing
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
64 #include <linux/interrupt.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
86 #include <net/checksum.h>
90 /* Temporary hack for merging atl1 and atl2 */
94 * atl1_pci_tbl - PCI Device ID Table
96 static const struct pci_device_id atl1_pci_tbl[] = {
97 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98 /* required last entry */
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
104 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105 * @adapter: board private structure to initialize
107 * atl1_sw_init initializes the Adapter private data structure.
108 * Fields are initialized based on PCI device information and
109 * OS network device settings (MTU size).
111 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
113 struct atl1_hw *hw = &adapter->hw;
114 struct net_device *netdev = adapter->netdev;
116 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
117 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
120 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
121 adapter->ict = 50000; /* 100ms */
122 adapter->link_speed = SPEED_0; /* hardware init */
123 adapter->link_duplex = FULL_DUPLEX;
125 hw->phy_configured = false;
126 hw->preamble_len = 7;
136 hw->rfd_fetch_gap = 1;
137 hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
138 hw->rx_jumbo_lkah = 1;
139 hw->rrd_ret_timer = 16;
141 hw->tpd_fetch_th = 16;
142 hw->txf_burst = 0x100;
143 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
144 hw->tpd_fetch_gap = 1;
145 hw->rcb_value = atl1_rcb_64;
146 hw->dma_ord = atl1_dma_ord_enh;
147 hw->dmar_block = atl1_dma_req_256;
148 hw->dmaw_block = atl1_dma_req_256;
151 hw->cmb_rx_timer = 1; /* about 2us */
152 hw->cmb_tx_timer = 1; /* about 2us */
153 hw->smb_timer = 100000; /* about 200ms */
155 spin_lock_init(&adapter->lock);
156 spin_lock_init(&adapter->mb_lock);
161 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
163 struct atl1_adapter *adapter = netdev_priv(netdev);
166 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
171 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
174 struct atl1_adapter *adapter = netdev_priv(netdev);
176 atl1_write_phy_reg(&adapter->hw, reg_num, val);
185 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
187 struct atl1_adapter *adapter = netdev_priv(netdev);
191 if (!netif_running(netdev))
194 spin_lock_irqsave(&adapter->lock, flags);
195 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
196 spin_unlock_irqrestore(&adapter->lock, flags);
202 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
203 * @adapter: board private structure
205 * Return 0 on success, negative on failure
207 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
209 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
210 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
211 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
212 struct atl1_ring_header *ring_header = &adapter->ring_header;
213 struct pci_dev *pdev = adapter->pdev;
217 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219 if (unlikely(!tpd_ring->buffer_info)) {
220 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
223 rfd_ring->buffer_info =
224 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
227 * real ring DMA buffer
228 * each ring/block may need up to 8 bytes for alignment, hence the
229 * additional 40 bytes tacked onto the end.
231 ring_header->size = size =
232 sizeof(struct tx_packet_desc) * tpd_ring->count
233 + sizeof(struct rx_free_desc) * rfd_ring->count
234 + sizeof(struct rx_return_desc) * rrd_ring->count
235 + sizeof(struct coals_msg_block)
236 + sizeof(struct stats_msg_block)
239 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
241 if (unlikely(!ring_header->desc)) {
242 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
246 memset(ring_header->desc, 0, ring_header->size);
249 tpd_ring->dma = ring_header->dma;
250 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
251 tpd_ring->dma += offset;
252 tpd_ring->desc = (u8 *) ring_header->desc + offset;
253 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
256 rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
257 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
258 rfd_ring->dma += offset;
259 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
260 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
264 rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
265 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
266 rrd_ring->dma += offset;
267 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
268 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
272 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
273 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
274 adapter->cmb.dma += offset;
275 adapter->cmb.cmb = (struct coals_msg_block *)
276 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
279 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
280 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
281 adapter->smb.dma += offset;
282 adapter->smb.smb = (struct stats_msg_block *)
283 ((u8 *) adapter->cmb.cmb +
284 (sizeof(struct coals_msg_block) + offset));
289 kfree(tpd_ring->buffer_info);
293 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
295 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
296 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
297 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
299 atomic_set(&tpd_ring->next_to_use, 0);
300 atomic_set(&tpd_ring->next_to_clean, 0);
302 rfd_ring->next_to_clean = 0;
303 atomic_set(&rfd_ring->next_to_use, 0);
305 rrd_ring->next_to_use = 0;
306 atomic_set(&rrd_ring->next_to_clean, 0);
310 * atl1_clean_rx_ring - Free RFD Buffers
311 * @adapter: board private structure
313 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
315 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
316 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
317 struct atl1_buffer *buffer_info;
318 struct pci_dev *pdev = adapter->pdev;
322 /* Free all the Rx ring sk_buffs */
323 for (i = 0; i < rfd_ring->count; i++) {
324 buffer_info = &rfd_ring->buffer_info[i];
325 if (buffer_info->dma) {
326 pci_unmap_page(pdev, buffer_info->dma,
327 buffer_info->length, PCI_DMA_FROMDEVICE);
328 buffer_info->dma = 0;
330 if (buffer_info->skb) {
331 dev_kfree_skb(buffer_info->skb);
332 buffer_info->skb = NULL;
336 size = sizeof(struct atl1_buffer) * rfd_ring->count;
337 memset(rfd_ring->buffer_info, 0, size);
339 /* Zero out the descriptor ring */
340 memset(rfd_ring->desc, 0, rfd_ring->size);
342 rfd_ring->next_to_clean = 0;
343 atomic_set(&rfd_ring->next_to_use, 0);
345 rrd_ring->next_to_use = 0;
346 atomic_set(&rrd_ring->next_to_clean, 0);
350 * atl1_clean_tx_ring - Free Tx Buffers
351 * @adapter: board private structure
353 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
355 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
356 struct atl1_buffer *buffer_info;
357 struct pci_dev *pdev = adapter->pdev;
361 /* Free all the Tx ring sk_buffs */
362 for (i = 0; i < tpd_ring->count; i++) {
363 buffer_info = &tpd_ring->buffer_info[i];
364 if (buffer_info->dma) {
365 pci_unmap_page(pdev, buffer_info->dma,
366 buffer_info->length, PCI_DMA_TODEVICE);
367 buffer_info->dma = 0;
371 for (i = 0; i < tpd_ring->count; i++) {
372 buffer_info = &tpd_ring->buffer_info[i];
373 if (buffer_info->skb) {
374 dev_kfree_skb_any(buffer_info->skb);
375 buffer_info->skb = NULL;
379 size = sizeof(struct atl1_buffer) * tpd_ring->count;
380 memset(tpd_ring->buffer_info, 0, size);
382 /* Zero out the descriptor ring */
383 memset(tpd_ring->desc, 0, tpd_ring->size);
385 atomic_set(&tpd_ring->next_to_use, 0);
386 atomic_set(&tpd_ring->next_to_clean, 0);
390 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
391 * @adapter: board private structure
393 * Free all transmit software resources
395 void atl1_free_ring_resources(struct atl1_adapter *adapter)
397 struct pci_dev *pdev = adapter->pdev;
398 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
399 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
400 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
401 struct atl1_ring_header *ring_header = &adapter->ring_header;
403 atl1_clean_tx_ring(adapter);
404 atl1_clean_rx_ring(adapter);
406 kfree(tpd_ring->buffer_info);
407 pci_free_consistent(pdev, ring_header->size, ring_header->desc,
410 tpd_ring->buffer_info = NULL;
411 tpd_ring->desc = NULL;
414 rfd_ring->buffer_info = NULL;
415 rfd_ring->desc = NULL;
418 rrd_ring->desc = NULL;
422 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
425 struct atl1_hw *hw = &adapter->hw;
426 struct net_device *netdev = adapter->netdev;
427 /* Config MAC CTRL Register */
428 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
430 if (FULL_DUPLEX == adapter->link_duplex)
431 value |= MAC_CTRL_DUPLX;
433 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
434 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
435 MAC_CTRL_SPEED_SHIFT);
437 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
439 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
440 /* preamble length */
441 value |= (((u32) adapter->hw.preamble_len
442 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
445 value |= MAC_CTRL_RMV_VLAN;
447 if (adapter->rx_csum)
448 value |= MAC_CTRL_RX_CHKSUM_EN;
451 value |= MAC_CTRL_BC_EN;
452 if (netdev->flags & IFF_PROMISC)
453 value |= MAC_CTRL_PROMIS_EN;
454 else if (netdev->flags & IFF_ALLMULTI)
455 value |= MAC_CTRL_MC_ALL_EN;
456 /* value |= MAC_CTRL_LOOPBACK; */
457 iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
460 static u32 atl1_check_link(struct atl1_adapter *adapter)
462 struct atl1_hw *hw = &adapter->hw;
463 struct net_device *netdev = adapter->netdev;
465 u16 speed, duplex, phy_data;
468 /* MII_BMSR must read twice */
469 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
470 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471 if (!(phy_data & BMSR_LSTATUS)) {
473 if (netif_carrier_ok(netdev)) {
474 /* old link state: Up */
475 dev_info(&adapter->pdev->dev, "link is down\n");
476 adapter->link_speed = SPEED_0;
477 netif_carrier_off(netdev);
478 netif_stop_queue(netdev);
484 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
488 switch (hw->media_type) {
489 case MEDIA_TYPE_1000M_FULL:
490 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
493 case MEDIA_TYPE_100M_FULL:
494 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
497 case MEDIA_TYPE_100M_HALF:
498 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
501 case MEDIA_TYPE_10M_FULL:
502 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
505 case MEDIA_TYPE_10M_HALF:
506 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
511 /* link result is our setting */
513 if (adapter->link_speed != speed
514 || adapter->link_duplex != duplex) {
515 adapter->link_speed = speed;
516 adapter->link_duplex = duplex;
517 atl1_setup_mac_ctrl(adapter);
518 dev_info(&adapter->pdev->dev,
519 "%s link is up %d Mbps %s\n",
520 netdev->name, adapter->link_speed,
521 adapter->link_duplex == FULL_DUPLEX ?
522 "full duplex" : "half duplex");
524 if (!netif_carrier_ok(netdev)) {
525 /* Link down -> Up */
526 netif_carrier_on(netdev);
527 netif_wake_queue(netdev);
532 /* change original link status */
533 if (netif_carrier_ok(netdev)) {
534 adapter->link_speed = SPEED_0;
535 netif_carrier_off(netdev);
536 netif_stop_queue(netdev);
539 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
540 hw->media_type != MEDIA_TYPE_1000M_FULL) {
541 switch (hw->media_type) {
542 case MEDIA_TYPE_100M_FULL:
543 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
546 case MEDIA_TYPE_100M_HALF:
547 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
549 case MEDIA_TYPE_10M_FULL:
551 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
554 /* MEDIA_TYPE_10M_HALF: */
555 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
558 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
562 /* auto-neg, insert timer to re-config phy */
563 if (!adapter->phy_timer_pending) {
564 adapter->phy_timer_pending = true;
565 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
572 * atl1_change_mtu - Change the Maximum Transfer Unit
573 * @netdev: network interface device structure
574 * @new_mtu: new value for maximum frame size
576 * Returns 0 on success, negative on failure
578 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
580 struct atl1_adapter *adapter = netdev_priv(netdev);
581 int old_mtu = netdev->mtu;
582 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
584 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
590 adapter->hw.max_frame_size = max_frame;
591 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
592 adapter->rx_buffer_len = (max_frame + 7) & ~7;
593 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
595 netdev->mtu = new_mtu;
596 if ((old_mtu != new_mtu) && netif_running(netdev)) {
604 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
608 /* RFD Flow Control */
609 value = adapter->rfd_ring.count;
615 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
616 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
617 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
619 /* RRD Flow Control */
620 value = adapter->rrd_ring.count;
625 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
626 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
627 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
630 static void set_flow_ctrl_new(struct atl1_hw *hw)
634 /* RXF Flow Control */
635 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
642 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
643 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
644 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
646 /* RRD Flow Control */
647 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
654 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
655 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
656 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
660 * atl1_configure - Configure Transmit&Receive Unit after Reset
661 * @adapter: board private structure
663 * Configure the Tx /Rx unit of the MAC after a reset.
665 static u32 atl1_configure(struct atl1_adapter *adapter)
667 struct atl1_hw *hw = &adapter->hw;
670 /* clear interrupt status */
671 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
673 /* set MAC Address */
674 value = (((u32) hw->mac_addr[2]) << 24) |
675 (((u32) hw->mac_addr[3]) << 16) |
676 (((u32) hw->mac_addr[4]) << 8) |
677 (((u32) hw->mac_addr[5]));
678 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
679 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
680 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
684 /* HI base address */
685 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
686 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
687 /* LO base address */
688 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
689 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
690 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
691 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
692 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
693 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
694 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
695 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
696 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
697 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
700 value = adapter->rrd_ring.count;
702 value += adapter->rfd_ring.count;
703 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
704 iowrite32(adapter->tpd_ring.count, hw->hw_addr +
705 REG_DESC_TPD_RING_SIZE);
708 iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
711 value = ((atomic_read(&adapter->tpd_ring.next_to_use)
712 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
713 ((atomic_read(&adapter->rrd_ring.next_to_clean)
714 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
715 ((atomic_read(&adapter->rfd_ring.next_to_use)
716 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
717 iowrite32(value, hw->hw_addr + REG_MAILBOX);
720 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
721 << MAC_IPG_IFG_IPGT_SHIFT) |
722 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
723 << MAC_IPG_IFG_MIFG_SHIFT) |
724 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
725 << MAC_IPG_IFG_IPGR1_SHIFT) |
726 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
727 << MAC_IPG_IFG_IPGR2_SHIFT);
728 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
730 /* config Half-Duplex Control */
731 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
732 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
733 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
734 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
735 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
736 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
737 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
738 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
740 /* set Interrupt Moderator Timer */
741 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
742 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
744 /* set Interrupt Clear Timer */
745 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
747 /* set max frame size hw will accept */
748 iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
750 /* jumbo size & rrd retirement timer */
751 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
752 << RXQ_JMBOSZ_TH_SHIFT) |
753 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
754 << RXQ_JMBO_LKAH_SHIFT) |
755 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
756 << RXQ_RRD_TIMER_SHIFT);
757 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
760 switch (hw->dev_rev) {
765 set_flow_ctrl_old(adapter);
768 set_flow_ctrl_new(hw);
773 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
774 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
775 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
776 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
777 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
778 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
780 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
782 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
783 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
784 << TX_JUMBO_TASK_TH_SHIFT) |
785 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
786 << TX_TPD_MIN_IPG_SHIFT);
787 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
790 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
791 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
792 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
793 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
794 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
795 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
797 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
799 /* config DMA Engine */
800 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
801 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
802 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
803 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
805 value |= (u32) hw->dma_ord;
806 if (atl1_rcb_128 == hw->rcb_value)
807 value |= DMA_CTRL_RCB_VALUE;
808 iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
810 /* config CMB / SMB */
811 value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
812 hw->cmb_tpd : adapter->tpd_ring.count;
814 value |= hw->cmb_rrd;
815 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
816 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
817 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
818 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
820 /* --- enable CMB / SMB */
821 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
822 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
824 value = ioread32(adapter->hw.hw_addr + REG_ISR);
825 if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
826 value = 1; /* config failed */
830 /* clear all interrupt status */
831 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
832 iowrite32(0, adapter->hw.hw_addr + REG_ISR);
837 * atl1_pcie_patch - Patch for PCIE module
839 static void atl1_pcie_patch(struct atl1_adapter *adapter)
843 /* much vendor magic here */
845 iowrite32(value, adapter->hw.hw_addr + 0x12FC);
846 /* pcie flow control mode change */
847 value = ioread32(adapter->hw.hw_addr + 0x1008);
849 iowrite32(value, adapter->hw.hw_addr + 0x1008);
853 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
854 * on PCI Command register is disable.
855 * The function enable this bit.
856 * Brackett, 2006/03/15
858 static void atl1_via_workaround(struct atl1_adapter *adapter)
862 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
863 if (value & PCI_COMMAND_INTX_DISABLE)
864 value &= ~PCI_COMMAND_INTX_DISABLE;
865 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
868 static void atl1_inc_smb(struct atl1_adapter *adapter)
870 struct stats_msg_block *smb = adapter->smb.smb;
872 /* Fill out the OS statistics structure */
873 adapter->soft_stats.rx_packets += smb->rx_ok;
874 adapter->soft_stats.tx_packets += smb->tx_ok;
875 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
876 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
877 adapter->soft_stats.multicast += smb->rx_mcast;
878 adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
879 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
882 adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
883 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
884 smb->rx_rrd_ov + smb->rx_align_err);
885 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
886 adapter->soft_stats.rx_length_errors += smb->rx_len_err;
887 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
888 adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
889 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
892 adapter->soft_stats.rx_pause += smb->rx_pause;
893 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
894 adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
897 adapter->soft_stats.tx_errors += (smb->tx_late_col +
898 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
899 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
900 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
901 adapter->soft_stats.tx_window_errors += smb->tx_late_col;
903 adapter->soft_stats.excecol += smb->tx_abort_col;
904 adapter->soft_stats.deffer += smb->tx_defer;
905 adapter->soft_stats.scc += smb->tx_1_col;
906 adapter->soft_stats.mcc += smb->tx_2_col;
907 adapter->soft_stats.latecol += smb->tx_late_col;
908 adapter->soft_stats.tx_underun += smb->tx_underrun;
909 adapter->soft_stats.tx_trunc += smb->tx_trunc;
910 adapter->soft_stats.tx_pause += smb->tx_pause;
912 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
913 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
914 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
915 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
916 adapter->net_stats.multicast = adapter->soft_stats.multicast;
917 adapter->net_stats.collisions = adapter->soft_stats.collisions;
918 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
919 adapter->net_stats.rx_over_errors =
920 adapter->soft_stats.rx_missed_errors;
921 adapter->net_stats.rx_length_errors =
922 adapter->soft_stats.rx_length_errors;
923 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
924 adapter->net_stats.rx_frame_errors =
925 adapter->soft_stats.rx_frame_errors;
926 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
927 adapter->net_stats.rx_missed_errors =
928 adapter->soft_stats.rx_missed_errors;
929 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
930 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
931 adapter->net_stats.tx_aborted_errors =
932 adapter->soft_stats.tx_aborted_errors;
933 adapter->net_stats.tx_window_errors =
934 adapter->soft_stats.tx_window_errors;
935 adapter->net_stats.tx_carrier_errors =
936 adapter->soft_stats.tx_carrier_errors;
939 static void atl1_update_mailbox(struct atl1_adapter *adapter)
944 u32 rrd_next_to_clean;
947 spin_lock_irqsave(&adapter->mb_lock, flags);
949 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
950 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
951 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
953 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
954 MB_RFD_PROD_INDX_SHIFT) |
955 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
956 MB_RRD_CONS_INDX_SHIFT) |
957 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
958 MB_TPD_PROD_INDX_SHIFT);
959 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
961 spin_unlock_irqrestore(&adapter->mb_lock, flags);
964 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
965 struct rx_return_desc *rrd, u16 offset)
967 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
969 while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
970 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
971 if (++rfd_ring->next_to_clean == rfd_ring->count) {
972 rfd_ring->next_to_clean = 0;
977 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
978 struct rx_return_desc *rrd)
982 num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
983 adapter->rx_buffer_len;
984 if (rrd->num_buf == num_buf)
985 /* clean alloc flag for bad rrd */
986 atl1_clean_alloc_flag(adapter, rrd, num_buf);
989 static void atl1_rx_checksum(struct atl1_adapter *adapter,
990 struct rx_return_desc *rrd, struct sk_buff *skb)
992 struct pci_dev *pdev = adapter->pdev;
994 skb->ip_summed = CHECKSUM_NONE;
996 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
997 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998 ERR_FLAG_CODE | ERR_FLAG_OV)) {
999 adapter->hw_csum_err++;
1000 dev_printk(KERN_DEBUG, &pdev->dev,
1001 "rx checksum error\n");
1007 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1008 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1012 if (likely(!(rrd->err_flg &
1013 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1014 skb->ip_summed = CHECKSUM_UNNECESSARY;
1015 adapter->hw_csum_good++;
1019 /* IPv4, but hardware thinks its checksum is wrong */
1020 dev_printk(KERN_DEBUG, &pdev->dev,
1021 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1022 rrd->pkt_flg, rrd->err_flg);
1023 skb->ip_summed = CHECKSUM_COMPLETE;
1024 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025 adapter->hw_csum_err++;
1030 * atl1_alloc_rx_buffers - Replace used receive buffers
1031 * @adapter: address of board private structure
1033 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1035 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1036 struct pci_dev *pdev = adapter->pdev;
1038 unsigned long offset;
1039 struct atl1_buffer *buffer_info, *next_info;
1040 struct sk_buff *skb;
1042 u16 rfd_next_to_use, next_next;
1043 struct rx_free_desc *rfd_desc;
1045 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1046 if (++next_next == rfd_ring->count)
1048 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1049 next_info = &rfd_ring->buffer_info[next_next];
1051 while (!buffer_info->alloced && !next_info->alloced) {
1052 if (buffer_info->skb) {
1053 buffer_info->alloced = 1;
1057 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1059 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1060 if (unlikely(!skb)) {
1061 /* Better luck next round */
1062 adapter->net_stats.rx_dropped++;
1067 * Make buffer alignment 2 beyond a 16 byte boundary
1068 * this will result in a 16 byte aligned IP header after
1069 * the 14 byte MAC header is removed
1071 skb_reserve(skb, NET_IP_ALIGN);
1073 buffer_info->alloced = 1;
1074 buffer_info->skb = skb;
1075 buffer_info->length = (u16) adapter->rx_buffer_len;
1076 page = virt_to_page(skb->data);
1077 offset = (unsigned long)skb->data & ~PAGE_MASK;
1078 buffer_info->dma = pci_map_page(pdev, page, offset,
1079 adapter->rx_buffer_len,
1080 PCI_DMA_FROMDEVICE);
1081 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1082 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1083 rfd_desc->coalese = 0;
1086 rfd_next_to_use = next_next;
1087 if (unlikely(++next_next == rfd_ring->count))
1090 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1091 next_info = &rfd_ring->buffer_info[next_next];
1097 * Force memory writes to complete before letting h/w
1098 * know there are new descriptors to fetch. (Only
1099 * applicable for weak-ordered memory model archs,
1103 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1108 static void atl1_intr_rx(struct atl1_adapter *adapter)
1112 u16 rrd_next_to_clean;
1114 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1115 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1116 struct atl1_buffer *buffer_info;
1117 struct rx_return_desc *rrd;
1118 struct sk_buff *skb;
1122 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1125 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1127 if (likely(rrd->xsz.valid)) { /* packet valid */
1129 /* check rrd status */
1130 if (likely(rrd->num_buf == 1))
1133 /* rrd seems to be bad */
1134 if (unlikely(i-- > 0)) {
1135 /* rrd may not be DMAed completely */
1136 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1137 "incomplete RRD DMA transfer\n");
1142 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1144 /* see if update RFD index */
1145 if (rrd->num_buf > 1)
1146 atl1_update_rfd_index(adapter, rrd);
1150 if (++rrd_next_to_clean == rrd_ring->count)
1151 rrd_next_to_clean = 0;
1154 } else { /* current rrd still not be updated */
1159 /* clean alloc flag for bad rrd */
1160 atl1_clean_alloc_flag(adapter, rrd, 0);
1162 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1163 if (++rfd_ring->next_to_clean == rfd_ring->count)
1164 rfd_ring->next_to_clean = 0;
1166 /* update rrd next to clean */
1167 if (++rrd_next_to_clean == rrd_ring->count)
1168 rrd_next_to_clean = 0;
1171 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1172 if (!(rrd->err_flg &
1173 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1175 /* packet error, don't need upstream */
1176 buffer_info->alloced = 0;
1183 pci_unmap_page(adapter->pdev, buffer_info->dma,
1184 buffer_info->length, PCI_DMA_FROMDEVICE);
1185 skb = buffer_info->skb;
1186 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1188 skb_put(skb, length - ETH_FCS_LEN);
1190 /* Receive Checksum Offload */
1191 atl1_rx_checksum(adapter, rrd, skb);
1192 skb->protocol = eth_type_trans(skb, adapter->netdev);
1194 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1195 u16 vlan_tag = (rrd->vlan_tag >> 4) |
1196 ((rrd->vlan_tag & 7) << 13) |
1197 ((rrd->vlan_tag & 8) << 9);
1198 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1202 /* let protocol layer free skb */
1203 buffer_info->skb = NULL;
1204 buffer_info->alloced = 0;
1207 adapter->netdev->last_rx = jiffies;
1210 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1212 atl1_alloc_rx_buffers(adapter);
1214 /* update mailbox ? */
1216 u32 tpd_next_to_use;
1217 u32 rfd_next_to_use;
1219 spin_lock(&adapter->mb_lock);
1221 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1223 atomic_read(&adapter->rfd_ring.next_to_use);
1225 atomic_read(&adapter->rrd_ring.next_to_clean);
1226 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1227 MB_RFD_PROD_INDX_SHIFT) |
1228 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1229 MB_RRD_CONS_INDX_SHIFT) |
1230 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1231 MB_TPD_PROD_INDX_SHIFT);
1232 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1233 spin_unlock(&adapter->mb_lock);
1237 static void atl1_intr_tx(struct atl1_adapter *adapter)
1239 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1240 struct atl1_buffer *buffer_info;
1241 u16 sw_tpd_next_to_clean;
1242 u16 cmb_tpd_next_to_clean;
1244 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1245 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1247 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1248 struct tx_packet_desc *tpd;
1250 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1251 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1252 if (buffer_info->dma) {
1253 pci_unmap_page(adapter->pdev, buffer_info->dma,
1254 buffer_info->length, PCI_DMA_TODEVICE);
1255 buffer_info->dma = 0;
1258 if (buffer_info->skb) {
1259 dev_kfree_skb_irq(buffer_info->skb);
1260 buffer_info->skb = NULL;
1262 tpd->buffer_addr = 0;
1265 if (++sw_tpd_next_to_clean == tpd_ring->count)
1266 sw_tpd_next_to_clean = 0;
1268 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1270 if (netif_queue_stopped(adapter->netdev)
1271 && netif_carrier_ok(adapter->netdev))
1272 netif_wake_queue(adapter->netdev);
1275 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1277 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1278 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1279 return ((next_to_clean > next_to_use) ?
1280 next_to_clean - next_to_use - 1 :
1281 tpd_ring->count + next_to_clean - next_to_use - 1);
1284 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1285 struct tso_param *tso)
1287 /* We enter this function holding a spinlock. */
1291 if (skb_shinfo(skb)->gso_size) {
1292 if (skb_header_cloned(skb)) {
1293 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1298 if (skb->protocol == ntohs(ETH_P_IP)) {
1299 struct iphdr *iph = ip_hdr(skb);
1303 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1304 iph->daddr, 0, IPPROTO_TCP, 0);
1305 ipofst = skb_network_offset(skb);
1306 if (ipofst != ETH_HLEN) /* 802.3 frame */
1307 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1309 tso->tsopl |= (iph->ihl &
1310 TSO_PARAM_IPHL_MASK) << TSO_PARAM_IPHL_SHIFT;
1311 tso->tsopl |= ((tcp_hdrlen(skb) >> 2) &
1312 TSO_PARAM_TCPHDRLEN_MASK) <<
1313 TSO_PARAM_TCPHDRLEN_SHIFT;
1314 tso->tsopl |= (skb_shinfo(skb)->gso_size &
1315 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
1316 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
1317 tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
1318 tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
1325 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1326 struct csum_param *csum)
1330 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1331 cso = skb_transport_offset(skb);
1332 css = cso + skb->csum_offset;
1333 if (unlikely(cso & 0x1)) {
1334 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1335 "payload offset not an even number\n");
1338 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
1339 CSUM_PARAM_PLOADOFFSET_SHIFT;
1340 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
1341 CSUM_PARAM_XSUMOFFSET_SHIFT;
1342 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
1349 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1352 /* We enter this function holding a spinlock. */
1353 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1354 struct atl1_buffer *buffer_info;
1356 int first_buf_len = skb->len;
1357 unsigned long offset;
1358 unsigned int nr_frags;
1360 u16 tpd_next_to_use;
1364 first_buf_len -= skb->data_len;
1365 nr_frags = skb_shinfo(skb)->nr_frags;
1366 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1367 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1368 if (unlikely(buffer_info->skb))
1370 /* put skb in last TPD */
1371 buffer_info->skb = NULL;
1375 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1376 buffer_info->length = proto_hdr_len;
1377 page = virt_to_page(skb->data);
1378 offset = (unsigned long)skb->data & ~PAGE_MASK;
1379 buffer_info->dma = pci_map_page(adapter->pdev, page,
1380 offset, proto_hdr_len,
1383 if (++tpd_next_to_use == tpd_ring->count)
1384 tpd_next_to_use = 0;
1386 if (first_buf_len > proto_hdr_len) {
1389 len12 = first_buf_len - proto_hdr_len;
1390 m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
1391 ATL1_MAX_TX_BUF_LEN;
1392 for (i = 0; i < m; i++) {
1394 &tpd_ring->buffer_info[tpd_next_to_use];
1395 buffer_info->skb = NULL;
1396 buffer_info->length =
1397 (ATL1_MAX_TX_BUF_LEN >=
1398 len12) ? ATL1_MAX_TX_BUF_LEN : len12;
1399 len12 -= buffer_info->length;
1400 page = virt_to_page(skb->data +
1402 i * ATL1_MAX_TX_BUF_LEN));
1403 offset = (unsigned long)(skb->data +
1405 i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
1406 buffer_info->dma = pci_map_page(adapter->pdev,
1407 page, offset, buffer_info->length,
1409 if (++tpd_next_to_use == tpd_ring->count)
1410 tpd_next_to_use = 0;
1415 buffer_info->length = first_buf_len;
1416 page = virt_to_page(skb->data);
1417 offset = (unsigned long)skb->data & ~PAGE_MASK;
1418 buffer_info->dma = pci_map_page(adapter->pdev, page,
1419 offset, first_buf_len, PCI_DMA_TODEVICE);
1420 if (++tpd_next_to_use == tpd_ring->count)
1421 tpd_next_to_use = 0;
1424 for (f = 0; f < nr_frags; f++) {
1425 struct skb_frag_struct *frag;
1428 frag = &skb_shinfo(skb)->frags[f];
1431 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1432 for (i = 0; i < m; i++) {
1433 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1434 if (unlikely(buffer_info->skb))
1436 buffer_info->skb = NULL;
1437 buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
1438 ATL1_MAX_TX_BUF_LEN : lenf;
1439 lenf -= buffer_info->length;
1440 buffer_info->dma = pci_map_page(adapter->pdev,
1442 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1443 buffer_info->length, PCI_DMA_TODEVICE);
1445 if (++tpd_next_to_use == tpd_ring->count)
1446 tpd_next_to_use = 0;
1450 /* last tpd's buffer-info */
1451 buffer_info->skb = skb;
1454 static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1455 union tpd_descr *descr)
1457 /* We enter this function holding a spinlock. */
1458 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1461 struct atl1_buffer *buffer_info;
1462 struct tx_packet_desc *tpd;
1463 u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1465 for (j = 0; j < count; j++) {
1466 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1467 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
1468 tpd->desc.csum.csumpu = descr->csum.csumpu;
1469 tpd->desc.csum.csumpl = descr->csum.csumpl;
1470 tpd->desc.tso.tsopu = descr->tso.tsopu;
1471 tpd->desc.tso.tsopl = descr->tso.tsopl;
1472 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1473 tpd->desc.data = descr->data;
1474 tpd->desc.tso.tsopu |= (cpu_to_le16(buffer_info->length) &
1475 TSO_PARAM_BUFLEN_MASK) << TSO_PARAM_BUFLEN_SHIFT;
1477 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1478 TSO_PARAM_SEGMENT_MASK;
1480 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
1482 if (j == (count - 1))
1483 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_EOP_SHIFT;
1485 if (++tpd_next_to_use == tpd_ring->count)
1486 tpd_next_to_use = 0;
1489 * Force memory writes to complete before letting h/w
1490 * know there are new descriptors to fetch. (Only
1491 * applicable for weak-ordered memory model archs,
1496 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
1499 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1501 struct atl1_adapter *adapter = netdev_priv(netdev);
1507 union tpd_descr param;
1510 unsigned long flags;
1511 unsigned int nr_frags = 0;
1512 unsigned int mss = 0;
1514 unsigned int proto_hdr_len;
1516 len -= skb->data_len;
1518 if (unlikely(skb->len == 0)) {
1519 dev_kfree_skb_any(skb);
1520 return NETDEV_TX_OK;
1524 param.tso.tsopu = 0;
1525 param.tso.tsopl = 0;
1526 param.csum.csumpu = 0;
1527 param.csum.csumpl = 0;
1529 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1530 nr_frags = skb_shinfo(skb)->nr_frags;
1531 for (f = 0; f < nr_frags; f++) {
1532 frag_size = skb_shinfo(skb)->frags[f].size;
1534 count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1535 ATL1_MAX_TX_BUF_LEN;
1538 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1539 mss = skb_shinfo(skb)->gso_size;
1541 if (skb->protocol == htons(ETH_P_IP)) {
1542 proto_hdr_len = (skb_transport_offset(skb) +
1544 if (unlikely(proto_hdr_len > len)) {
1545 dev_kfree_skb_any(skb);
1546 return NETDEV_TX_OK;
1548 /* need additional TPD ? */
1549 if (proto_hdr_len != len)
1550 count += (len - proto_hdr_len +
1551 ATL1_MAX_TX_BUF_LEN - 1) /
1552 ATL1_MAX_TX_BUF_LEN;
1556 if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1557 /* Can't get lock - tell upper layer to requeue */
1558 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1559 return NETDEV_TX_LOCKED;
1562 if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1563 /* not enough descriptors */
1564 netif_stop_queue(netdev);
1565 spin_unlock_irqrestore(&adapter->lock, flags);
1566 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1567 return NETDEV_TX_BUSY;
1572 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1573 vlan_tag = vlan_tx_tag_get(skb);
1574 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1575 ((vlan_tag >> 9) & 0x8);
1576 param.tso.tsopl |= 1 << TSO_PARAM_INSVLAG_SHIFT;
1577 param.tso.tsopu |= (vlan_tag & TSO_PARAM_VLANTAG_MASK) <<
1578 TSO_PARAM_VLAN_SHIFT;
1581 tso = atl1_tso(adapter, skb, ¶m.tso);
1583 spin_unlock_irqrestore(&adapter->lock, flags);
1584 dev_kfree_skb_any(skb);
1585 return NETDEV_TX_OK;
1589 ret_val = atl1_tx_csum(adapter, skb, ¶m.csum);
1591 spin_unlock_irqrestore(&adapter->lock, flags);
1592 dev_kfree_skb_any(skb);
1593 return NETDEV_TX_OK;
1597 val = (param.tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1598 TSO_PARAM_SEGMENT_MASK;
1599 atl1_tx_map(adapter, skb, 1 == val);
1600 atl1_tx_queue(adapter, count, ¶m);
1601 netdev->trans_start = jiffies;
1602 spin_unlock_irqrestore(&adapter->lock, flags);
1603 atl1_update_mailbox(adapter);
1604 return NETDEV_TX_OK;
1608 * atl1_intr - Interrupt Handler
1609 * @irq: interrupt number
1610 * @data: pointer to a network interface device structure
1611 * @pt_regs: CPU registers structure
1613 static irqreturn_t atl1_intr(int irq, void *data)
1615 struct atl1_adapter *adapter = netdev_priv(data);
1620 status = adapter->cmb.cmb->int_stats;
1627 /* clear CMB interrupt status at once */
1628 adapter->cmb.cmb->int_stats = 0;
1630 if (status & ISR_GPHY) /* clear phy status */
1631 atlx_clear_phy_int(adapter);
1633 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1634 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1636 /* check if SMB intr */
1637 if (status & ISR_SMB)
1638 atl1_inc_smb(adapter);
1640 /* check if PCIE PHY Link down */
1641 if (status & ISR_PHY_LINKDOWN) {
1642 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1643 "pcie phy link down %x\n", status);
1644 if (netif_running(adapter->netdev)) { /* reset MAC */
1645 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1646 schedule_work(&adapter->pcie_dma_to_rst_task);
1651 /* check if DMA read/write error ? */
1652 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1653 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1654 "pcie DMA r/w error (status = 0x%x)\n",
1656 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1657 schedule_work(&adapter->pcie_dma_to_rst_task);
1662 if (status & ISR_GPHY) {
1663 adapter->soft_stats.tx_carrier_errors++;
1664 atl1_check_for_link(adapter);
1667 /* transmit event */
1668 if (status & ISR_CMB_TX)
1669 atl1_intr_tx(adapter);
1672 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1673 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1674 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1675 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1676 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1678 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1679 "rx exception, ISR = 0x%x\n", status);
1680 atl1_intr_rx(adapter);
1686 } while ((status = adapter->cmb.cmb->int_stats));
1688 /* re-enable Interrupt */
1689 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1694 * atl1_watchdog - Timer Call-back
1695 * @data: pointer to netdev cast into an unsigned long
1697 static void atl1_watchdog(unsigned long data)
1699 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1701 /* Reset the timer */
1702 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1706 * atl1_phy_config - Timer Call-back
1707 * @data: pointer to netdev cast into an unsigned long
1709 static void atl1_phy_config(unsigned long data)
1711 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1712 struct atl1_hw *hw = &adapter->hw;
1713 unsigned long flags;
1715 spin_lock_irqsave(&adapter->lock, flags);
1716 adapter->phy_timer_pending = false;
1717 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1718 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1719 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1720 spin_unlock_irqrestore(&adapter->lock, flags);
1724 * Orphaned vendor comment left intact here:
1726 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1727 * will assert. We do soft reset <0x1400=1> according
1728 * with the SPEC. BUT, it seemes that PCIE or DMA
1729 * state-machine will not be reset. DMAR_TO_INT will
1730 * assert again and again.
1733 static void atl1_tx_timeout_task(struct work_struct *work)
1735 struct atl1_adapter *adapter =
1736 container_of(work, struct atl1_adapter, tx_timeout_task);
1737 struct net_device *netdev = adapter->netdev;
1739 netif_device_detach(netdev);
1742 netif_device_attach(netdev);
1745 int atl1_reset(struct atl1_adapter *adapter)
1748 ret = atl1_reset_hw(&adapter->hw);
1751 return atl1_init_hw(&adapter->hw);
1754 s32 atl1_up(struct atl1_adapter *adapter)
1756 struct net_device *netdev = adapter->netdev;
1758 int irq_flags = IRQF_SAMPLE_RANDOM;
1760 /* hardware has been reset, we need to reload some things */
1761 atlx_set_multi(netdev);
1762 atl1_init_ring_ptrs(adapter);
1763 atlx_restore_vlan(adapter);
1764 err = atl1_alloc_rx_buffers(adapter);
1766 /* no RX BUFFER allocated */
1769 if (unlikely(atl1_configure(adapter))) {
1774 err = pci_enable_msi(adapter->pdev);
1776 dev_info(&adapter->pdev->dev,
1777 "Unable to enable MSI: %d\n", err);
1778 irq_flags |= IRQF_SHARED;
1781 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1782 netdev->name, netdev);
1786 mod_timer(&adapter->watchdog_timer, jiffies);
1787 atlx_irq_enable(adapter);
1788 atl1_check_link(adapter);
1792 pci_disable_msi(adapter->pdev);
1793 /* free rx_buffers */
1794 atl1_clean_rx_ring(adapter);
1798 void atl1_down(struct atl1_adapter *adapter)
1800 struct net_device *netdev = adapter->netdev;
1802 del_timer_sync(&adapter->watchdog_timer);
1803 del_timer_sync(&adapter->phy_config_timer);
1804 adapter->phy_timer_pending = false;
1806 atlx_irq_disable(adapter);
1807 free_irq(adapter->pdev->irq, netdev);
1808 pci_disable_msi(adapter->pdev);
1809 atl1_reset_hw(&adapter->hw);
1810 adapter->cmb.cmb->int_stats = 0;
1812 adapter->link_speed = SPEED_0;
1813 adapter->link_duplex = -1;
1814 netif_carrier_off(netdev);
1815 netif_stop_queue(netdev);
1817 atl1_clean_tx_ring(adapter);
1818 atl1_clean_rx_ring(adapter);
1822 * atl1_open - Called when a network interface is made active
1823 * @netdev: network interface device structure
1825 * Returns 0 on success, negative value on failure
1827 * The open entry point is called when a network interface is made
1828 * active by the system (IFF_UP). At this point all resources needed
1829 * for transmit and receive operations are allocated, the interrupt
1830 * handler is registered with the OS, the watchdog timer is started,
1831 * and the stack is notified that the interface is ready.
1833 static int atl1_open(struct net_device *netdev)
1835 struct atl1_adapter *adapter = netdev_priv(netdev);
1838 /* allocate transmit descriptors */
1839 err = atl1_setup_ring_resources(adapter);
1843 err = atl1_up(adapter);
1850 atl1_reset(adapter);
1855 * atl1_close - Disables a network interface
1856 * @netdev: network interface device structure
1858 * Returns 0, this is not allowed to fail
1860 * The close entry point is called when an interface is de-activated
1861 * by the OS. The hardware is still under the drivers control, but
1862 * needs to be disabled. A global MAC reset is issued to stop the
1863 * hardware, and all transmit and receive resources are freed.
1865 static int atl1_close(struct net_device *netdev)
1867 struct atl1_adapter *adapter = netdev_priv(netdev);
1869 atl1_free_ring_resources(adapter);
1874 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1876 struct net_device *netdev = pci_get_drvdata(pdev);
1877 struct atl1_adapter *adapter = netdev_priv(netdev);
1878 struct atl1_hw *hw = &adapter->hw;
1880 u32 wufc = adapter->wol;
1882 netif_device_detach(netdev);
1883 if (netif_running(netdev))
1886 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1887 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1888 if (ctrl & BMSR_LSTATUS)
1889 wufc &= ~ATLX_WUFC_LNKC;
1891 /* reduce speed to 10/100M */
1893 atl1_phy_enter_power_saving(hw);
1894 /* if resume, let driver to re- setup link */
1895 hw->phy_configured = false;
1896 atl1_set_mac_addr(hw);
1897 atlx_set_multi(netdev);
1900 /* turn on magic packet wol */
1901 if (wufc & ATLX_WUFC_MAG)
1902 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1904 /* turn on Link change WOL */
1905 if (wufc & ATLX_WUFC_LNKC)
1906 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1907 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1909 /* turn on all-multi mode if wake on multicast is enabled */
1910 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1911 ctrl &= ~MAC_CTRL_DBG;
1912 ctrl &= ~MAC_CTRL_PROMIS_EN;
1913 if (wufc & ATLX_WUFC_MC)
1914 ctrl |= MAC_CTRL_MC_ALL_EN;
1916 ctrl &= ~MAC_CTRL_MC_ALL_EN;
1918 /* turn on broadcast mode if wake on-BC is enabled */
1919 if (wufc & ATLX_WUFC_BC)
1920 ctrl |= MAC_CTRL_BC_EN;
1922 ctrl &= ~MAC_CTRL_BC_EN;
1925 ctrl |= MAC_CTRL_RX_EN;
1926 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1927 pci_enable_wake(pdev, PCI_D3hot, 1);
1928 pci_enable_wake(pdev, PCI_D3cold, 1);
1930 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1931 pci_enable_wake(pdev, PCI_D3hot, 0);
1932 pci_enable_wake(pdev, PCI_D3cold, 0);
1935 pci_save_state(pdev);
1936 pci_disable_device(pdev);
1938 pci_set_power_state(pdev, PCI_D3hot);
1943 static int atl1_resume(struct pci_dev *pdev)
1945 struct net_device *netdev = pci_get_drvdata(pdev);
1946 struct atl1_adapter *adapter = netdev_priv(netdev);
1949 pci_set_power_state(pdev, PCI_D0);
1950 pci_restore_state(pdev);
1952 /* FIXME: check and handle */
1953 err = pci_enable_device(pdev);
1954 pci_enable_wake(pdev, PCI_D3hot, 0);
1955 pci_enable_wake(pdev, PCI_D3cold, 0);
1957 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1958 atl1_reset(adapter);
1960 if (netif_running(netdev))
1962 netif_device_attach(netdev);
1964 atl1_via_workaround(adapter);
1969 #define atl1_suspend NULL
1970 #define atl1_resume NULL
1973 #ifdef CONFIG_NET_POLL_CONTROLLER
1974 static void atl1_poll_controller(struct net_device *netdev)
1976 disable_irq(netdev->irq);
1977 atl1_intr(netdev->irq, netdev);
1978 enable_irq(netdev->irq);
1983 * atl1_probe - Device Initialization Routine
1984 * @pdev: PCI device information struct
1985 * @ent: entry in atl1_pci_tbl
1987 * Returns 0 on success, negative on failure
1989 * atl1_probe initializes an adapter identified by a pci_dev structure.
1990 * The OS initialization, configuring of the adapter private structure,
1991 * and a hardware reset occur.
1993 static int __devinit atl1_probe(struct pci_dev *pdev,
1994 const struct pci_device_id *ent)
1996 struct net_device *netdev;
1997 struct atl1_adapter *adapter;
1998 static int cards_found = 0;
2001 err = pci_enable_device(pdev);
2006 * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2007 * shared register for the high 32 bits, so only a single, aligned,
2008 * 4 GB physical address range can be used at a time.
2010 * Supporting 64-bit DMA on this hardware is more trouble than it's
2011 * worth. It is far easier to limit to 32-bit DMA than update
2012 * various kernel subsystems to support the mechanics required by a
2013 * fixed-high-32-bit system.
2015 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2017 dev_err(&pdev->dev, "no usable DMA configuration\n");
2021 * Mark all PCI regions associated with PCI device
2022 * pdev as being reserved by owner atl1_driver_name
2024 err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2026 goto err_request_regions;
2029 * Enables bus-mastering on the device and calls
2030 * pcibios_set_master to do the needed arch specific settings
2032 pci_set_master(pdev);
2034 netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2037 goto err_alloc_etherdev;
2039 SET_NETDEV_DEV(netdev, &pdev->dev);
2041 pci_set_drvdata(pdev, netdev);
2042 adapter = netdev_priv(netdev);
2043 adapter->netdev = netdev;
2044 adapter->pdev = pdev;
2045 adapter->hw.back = adapter;
2047 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2048 if (!adapter->hw.hw_addr) {
2052 /* get device revision number */
2053 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2054 (REG_MASTER_CTRL + 2));
2055 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2057 /* set default ring resource counts */
2058 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2059 adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2061 adapter->mii.dev = netdev;
2062 adapter->mii.mdio_read = mdio_read;
2063 adapter->mii.mdio_write = mdio_write;
2064 adapter->mii.phy_id_mask = 0x1f;
2065 adapter->mii.reg_num_mask = 0x1f;
2067 netdev->open = &atl1_open;
2068 netdev->stop = &atl1_close;
2069 netdev->hard_start_xmit = &atl1_xmit_frame;
2070 netdev->get_stats = &atlx_get_stats;
2071 netdev->set_multicast_list = &atlx_set_multi;
2072 netdev->set_mac_address = &atl1_set_mac;
2073 netdev->change_mtu = &atl1_change_mtu;
2074 netdev->do_ioctl = &atlx_ioctl;
2075 netdev->tx_timeout = &atlx_tx_timeout;
2076 netdev->watchdog_timeo = 5 * HZ;
2077 #ifdef CONFIG_NET_POLL_CONTROLLER
2078 netdev->poll_controller = atl1_poll_controller;
2080 netdev->vlan_rx_register = atlx_vlan_rx_register;
2082 netdev->ethtool_ops = &atl1_ethtool_ops;
2083 adapter->bd_number = cards_found;
2085 /* setup the private structure */
2086 err = atl1_sw_init(adapter);
2090 netdev->features = NETIF_F_HW_CSUM;
2091 netdev->features |= NETIF_F_SG;
2092 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2093 netdev->features |= NETIF_F_TSO;
2094 netdev->features |= NETIF_F_LLTX;
2097 * patch for some L1 of old version,
2098 * the final version of L1 may not need these
2101 /* atl1_pcie_patch(adapter); */
2103 /* really reset GPHY core */
2104 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2107 * reset the controller to
2108 * put the device in a known good starting state
2110 if (atl1_reset_hw(&adapter->hw)) {
2115 /* copy the MAC address out of the EEPROM */
2116 atl1_read_mac_addr(&adapter->hw);
2117 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2119 if (!is_valid_ether_addr(netdev->dev_addr)) {
2124 atl1_check_options(adapter);
2126 /* pre-init the MAC, and setup link */
2127 err = atl1_init_hw(&adapter->hw);
2133 atl1_pcie_patch(adapter);
2134 /* assume we have no link for now */
2135 netif_carrier_off(netdev);
2136 netif_stop_queue(netdev);
2138 init_timer(&adapter->watchdog_timer);
2139 adapter->watchdog_timer.function = &atl1_watchdog;
2140 adapter->watchdog_timer.data = (unsigned long)adapter;
2142 init_timer(&adapter->phy_config_timer);
2143 adapter->phy_config_timer.function = &atl1_phy_config;
2144 adapter->phy_config_timer.data = (unsigned long)adapter;
2145 adapter->phy_timer_pending = false;
2147 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2149 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2151 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2153 err = register_netdev(netdev);
2158 atl1_via_workaround(adapter);
2162 pci_iounmap(pdev, adapter->hw.hw_addr);
2164 free_netdev(netdev);
2166 pci_release_regions(pdev);
2168 err_request_regions:
2169 pci_disable_device(pdev);
2174 * atl1_remove - Device Removal Routine
2175 * @pdev: PCI device information struct
2177 * atl1_remove is called by the PCI subsystem to alert the driver
2178 * that it should release a PCI device. The could be caused by a
2179 * Hot-Plug event, or because the driver is going to be removed from
2182 static void __devexit atl1_remove(struct pci_dev *pdev)
2184 struct net_device *netdev = pci_get_drvdata(pdev);
2185 struct atl1_adapter *adapter;
2186 /* Device not available. Return. */
2190 adapter = netdev_priv(netdev);
2193 * Some atl1 boards lack persistent storage for their MAC, and get it
2194 * from the BIOS during POST. If we've been messing with the MAC
2195 * address, we need to save the permanent one.
2197 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2198 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2200 atl1_set_mac_addr(&adapter->hw);
2203 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2204 unregister_netdev(netdev);
2205 pci_iounmap(pdev, adapter->hw.hw_addr);
2206 pci_release_regions(pdev);
2207 free_netdev(netdev);
2208 pci_disable_device(pdev);
2211 static struct pci_driver atl1_driver = {
2212 .name = ATLX_DRIVER_NAME,
2213 .id_table = atl1_pci_tbl,
2214 .probe = atl1_probe,
2215 .remove = __devexit_p(atl1_remove),
2216 .suspend = atl1_suspend,
2217 .resume = atl1_resume
2221 * atl1_exit_module - Driver Exit Cleanup Routine
2223 * atl1_exit_module is called just before the driver is removed
2226 static void __exit atl1_exit_module(void)
2228 pci_unregister_driver(&atl1_driver);
2232 * atl1_init_module - Driver Registration Routine
2234 * atl1_init_module is the first routine called when the driver is
2235 * loaded. All it does is register with the PCI subsystem.
2237 static int __init atl1_init_module(void)
2239 return pci_register_driver(&atl1_driver);
2242 module_init(atl1_init_module);
2243 module_exit(atl1_exit_module);
2246 char stat_string[ETH_GSTRING_LEN];
2251 #define ATL1_STAT(m) \
2252 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2254 static struct atl1_stats atl1_gstrings_stats[] = {
2255 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2256 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2257 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2258 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2259 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2260 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2261 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2262 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2263 {"multicast", ATL1_STAT(soft_stats.multicast)},
2264 {"collisions", ATL1_STAT(soft_stats.collisions)},
2265 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2266 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2267 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2268 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2269 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2270 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2271 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2272 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2273 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2274 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2275 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2276 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2277 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2278 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2279 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2280 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2281 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2282 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2283 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2284 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2285 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2288 static void atl1_get_ethtool_stats(struct net_device *netdev,
2289 struct ethtool_stats *stats, u64 *data)
2291 struct atl1_adapter *adapter = netdev_priv(netdev);
2295 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2296 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2297 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2298 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2303 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2307 return ARRAY_SIZE(atl1_gstrings_stats);
2313 static int atl1_get_settings(struct net_device *netdev,
2314 struct ethtool_cmd *ecmd)
2316 struct atl1_adapter *adapter = netdev_priv(netdev);
2317 struct atl1_hw *hw = &adapter->hw;
2319 ecmd->supported = (SUPPORTED_10baseT_Half |
2320 SUPPORTED_10baseT_Full |
2321 SUPPORTED_100baseT_Half |
2322 SUPPORTED_100baseT_Full |
2323 SUPPORTED_1000baseT_Full |
2324 SUPPORTED_Autoneg | SUPPORTED_TP);
2325 ecmd->advertising = ADVERTISED_TP;
2326 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2327 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2328 ecmd->advertising |= ADVERTISED_Autoneg;
2329 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2330 ecmd->advertising |= ADVERTISED_Autoneg;
2331 ecmd->advertising |=
2332 (ADVERTISED_10baseT_Half |
2333 ADVERTISED_10baseT_Full |
2334 ADVERTISED_100baseT_Half |
2335 ADVERTISED_100baseT_Full |
2336 ADVERTISED_1000baseT_Full);
2338 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2340 ecmd->port = PORT_TP;
2341 ecmd->phy_address = 0;
2342 ecmd->transceiver = XCVR_INTERNAL;
2344 if (netif_carrier_ok(adapter->netdev)) {
2345 u16 link_speed, link_duplex;
2346 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2347 ecmd->speed = link_speed;
2348 if (link_duplex == FULL_DUPLEX)
2349 ecmd->duplex = DUPLEX_FULL;
2351 ecmd->duplex = DUPLEX_HALF;
2356 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2357 hw->media_type == MEDIA_TYPE_1000M_FULL)
2358 ecmd->autoneg = AUTONEG_ENABLE;
2360 ecmd->autoneg = AUTONEG_DISABLE;
2365 static int atl1_set_settings(struct net_device *netdev,
2366 struct ethtool_cmd *ecmd)
2368 struct atl1_adapter *adapter = netdev_priv(netdev);
2369 struct atl1_hw *hw = &adapter->hw;
2372 u16 old_media_type = hw->media_type;
2374 if (netif_running(adapter->netdev)) {
2375 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2379 if (ecmd->autoneg == AUTONEG_ENABLE)
2380 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2382 if (ecmd->speed == SPEED_1000) {
2383 if (ecmd->duplex != DUPLEX_FULL) {
2384 dev_warn(&adapter->pdev->dev,
2385 "can't force to 1000M half duplex\n");
2389 hw->media_type = MEDIA_TYPE_1000M_FULL;
2390 } else if (ecmd->speed == SPEED_100) {
2391 if (ecmd->duplex == DUPLEX_FULL)
2392 hw->media_type = MEDIA_TYPE_100M_FULL;
2394 hw->media_type = MEDIA_TYPE_100M_HALF;
2396 if (ecmd->duplex == DUPLEX_FULL)
2397 hw->media_type = MEDIA_TYPE_10M_FULL;
2399 hw->media_type = MEDIA_TYPE_10M_HALF;
2402 switch (hw->media_type) {
2403 case MEDIA_TYPE_AUTO_SENSOR:
2405 ADVERTISED_10baseT_Half |
2406 ADVERTISED_10baseT_Full |
2407 ADVERTISED_100baseT_Half |
2408 ADVERTISED_100baseT_Full |
2409 ADVERTISED_1000baseT_Full |
2410 ADVERTISED_Autoneg | ADVERTISED_TP;
2412 case MEDIA_TYPE_1000M_FULL:
2414 ADVERTISED_1000baseT_Full |
2415 ADVERTISED_Autoneg | ADVERTISED_TP;
2418 ecmd->advertising = 0;
2421 if (atl1_phy_setup_autoneg_adv(hw)) {
2423 dev_warn(&adapter->pdev->dev,
2424 "invalid ethtool speed/duplex setting\n");
2427 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2428 hw->media_type == MEDIA_TYPE_1000M_FULL)
2429 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2431 switch (hw->media_type) {
2432 case MEDIA_TYPE_100M_FULL:
2434 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2437 case MEDIA_TYPE_100M_HALF:
2438 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2440 case MEDIA_TYPE_10M_FULL:
2442 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2445 /* MEDIA_TYPE_10M_HALF: */
2446 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2450 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2453 hw->media_type = old_media_type;
2455 if (netif_running(adapter->netdev)) {
2456 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2458 } else if (!ret_val) {
2459 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2460 atl1_reset(adapter);
2465 static void atl1_get_drvinfo(struct net_device *netdev,
2466 struct ethtool_drvinfo *drvinfo)
2468 struct atl1_adapter *adapter = netdev_priv(netdev);
2470 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2471 strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2472 sizeof(drvinfo->version));
2473 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2474 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2475 sizeof(drvinfo->bus_info));
2476 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2479 static void atl1_get_wol(struct net_device *netdev,
2480 struct ethtool_wolinfo *wol)
2482 struct atl1_adapter *adapter = netdev_priv(netdev);
2484 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2486 if (adapter->wol & ATLX_WUFC_EX)
2487 wol->wolopts |= WAKE_UCAST;
2488 if (adapter->wol & ATLX_WUFC_MC)
2489 wol->wolopts |= WAKE_MCAST;
2490 if (adapter->wol & ATLX_WUFC_BC)
2491 wol->wolopts |= WAKE_BCAST;
2492 if (adapter->wol & ATLX_WUFC_MAG)
2493 wol->wolopts |= WAKE_MAGIC;
2497 static int atl1_set_wol(struct net_device *netdev,
2498 struct ethtool_wolinfo *wol)
2500 struct atl1_adapter *adapter = netdev_priv(netdev);
2502 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2505 if (wol->wolopts & WAKE_UCAST)
2506 adapter->wol |= ATLX_WUFC_EX;
2507 if (wol->wolopts & WAKE_MCAST)
2508 adapter->wol |= ATLX_WUFC_MC;
2509 if (wol->wolopts & WAKE_BCAST)
2510 adapter->wol |= ATLX_WUFC_BC;
2511 if (wol->wolopts & WAKE_MAGIC)
2512 adapter->wol |= ATLX_WUFC_MAG;
2516 static int atl1_get_regs_len(struct net_device *netdev)
2518 return ATL1_REG_COUNT * sizeof(u32);
2521 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2524 struct atl1_adapter *adapter = netdev_priv(netdev);
2525 struct atl1_hw *hw = &adapter->hw;
2529 for (i = 0; i < ATL1_REG_COUNT; i++) {
2531 * This switch statement avoids reserved regions
2532 * of register space.
2557 /* reserved region; don't read it */
2561 /* unreserved region */
2562 regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2567 static void atl1_get_ringparam(struct net_device *netdev,
2568 struct ethtool_ringparam *ring)
2570 struct atl1_adapter *adapter = netdev_priv(netdev);
2571 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2572 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2574 ring->rx_max_pending = ATL1_MAX_RFD;
2575 ring->tx_max_pending = ATL1_MAX_TPD;
2576 ring->rx_mini_max_pending = 0;
2577 ring->rx_jumbo_max_pending = 0;
2578 ring->rx_pending = rxdr->count;
2579 ring->tx_pending = txdr->count;
2580 ring->rx_mini_pending = 0;
2581 ring->rx_jumbo_pending = 0;
2584 static int atl1_set_ringparam(struct net_device *netdev,
2585 struct ethtool_ringparam *ring)
2587 struct atl1_adapter *adapter = netdev_priv(netdev);
2588 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2589 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2590 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2592 struct atl1_tpd_ring tpd_old, tpd_new;
2593 struct atl1_rfd_ring rfd_old, rfd_new;
2594 struct atl1_rrd_ring rrd_old, rrd_new;
2595 struct atl1_ring_header rhdr_old, rhdr_new;
2598 tpd_old = adapter->tpd_ring;
2599 rfd_old = adapter->rfd_ring;
2600 rrd_old = adapter->rrd_ring;
2601 rhdr_old = adapter->ring_header;
2603 if (netif_running(adapter->netdev))
2606 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2607 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2609 rfdr->count = (rfdr->count + 3) & ~3;
2610 rrdr->count = rfdr->count;
2612 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2613 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2615 tpdr->count = (tpdr->count + 3) & ~3;
2617 if (netif_running(adapter->netdev)) {
2618 /* try to get new resources before deleting old */
2619 err = atl1_setup_ring_resources(adapter);
2621 goto err_setup_ring;
2624 * save the new, restore the old in order to free it,
2625 * then restore the new back again
2628 rfd_new = adapter->rfd_ring;
2629 rrd_new = adapter->rrd_ring;
2630 tpd_new = adapter->tpd_ring;
2631 rhdr_new = adapter->ring_header;
2632 adapter->rfd_ring = rfd_old;
2633 adapter->rrd_ring = rrd_old;
2634 adapter->tpd_ring = tpd_old;
2635 adapter->ring_header = rhdr_old;
2636 atl1_free_ring_resources(adapter);
2637 adapter->rfd_ring = rfd_new;
2638 adapter->rrd_ring = rrd_new;
2639 adapter->tpd_ring = tpd_new;
2640 adapter->ring_header = rhdr_new;
2642 err = atl1_up(adapter);
2649 adapter->rfd_ring = rfd_old;
2650 adapter->rrd_ring = rrd_old;
2651 adapter->tpd_ring = tpd_old;
2652 adapter->ring_header = rhdr_old;
2657 static void atl1_get_pauseparam(struct net_device *netdev,
2658 struct ethtool_pauseparam *epause)
2660 struct atl1_adapter *adapter = netdev_priv(netdev);
2661 struct atl1_hw *hw = &adapter->hw;
2663 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2664 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2665 epause->autoneg = AUTONEG_ENABLE;
2667 epause->autoneg = AUTONEG_DISABLE;
2669 epause->rx_pause = 1;
2670 epause->tx_pause = 1;
2673 static int atl1_set_pauseparam(struct net_device *netdev,
2674 struct ethtool_pauseparam *epause)
2676 struct atl1_adapter *adapter = netdev_priv(netdev);
2677 struct atl1_hw *hw = &adapter->hw;
2679 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2680 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2681 epause->autoneg = AUTONEG_ENABLE;
2683 epause->autoneg = AUTONEG_DISABLE;
2686 epause->rx_pause = 1;
2687 epause->tx_pause = 1;
2692 /* FIXME: is this right? -- CHS */
2693 static u32 atl1_get_rx_csum(struct net_device *netdev)
2698 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2704 switch (stringset) {
2706 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2707 memcpy(p, atl1_gstrings_stats[i].stat_string,
2709 p += ETH_GSTRING_LEN;
2715 static int atl1_nway_reset(struct net_device *netdev)
2717 struct atl1_adapter *adapter = netdev_priv(netdev);
2718 struct atl1_hw *hw = &adapter->hw;
2720 if (netif_running(netdev)) {
2724 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2725 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2726 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2728 switch (hw->media_type) {
2729 case MEDIA_TYPE_100M_FULL:
2730 phy_data = MII_CR_FULL_DUPLEX |
2731 MII_CR_SPEED_100 | MII_CR_RESET;
2733 case MEDIA_TYPE_100M_HALF:
2734 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2736 case MEDIA_TYPE_10M_FULL:
2737 phy_data = MII_CR_FULL_DUPLEX |
2738 MII_CR_SPEED_10 | MII_CR_RESET;
2741 /* MEDIA_TYPE_10M_HALF */
2742 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2745 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2751 const struct ethtool_ops atl1_ethtool_ops = {
2752 .get_settings = atl1_get_settings,
2753 .set_settings = atl1_set_settings,
2754 .get_drvinfo = atl1_get_drvinfo,
2755 .get_wol = atl1_get_wol,
2756 .set_wol = atl1_set_wol,
2757 .get_regs_len = atl1_get_regs_len,
2758 .get_regs = atl1_get_regs,
2759 .get_ringparam = atl1_get_ringparam,
2760 .set_ringparam = atl1_set_ringparam,
2761 .get_pauseparam = atl1_get_pauseparam,
2762 .set_pauseparam = atl1_set_pauseparam,
2763 .get_rx_csum = atl1_get_rx_csum,
2764 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2765 .get_link = ethtool_op_get_link,
2766 .set_sg = ethtool_op_set_sg,
2767 .get_strings = atl1_get_strings,
2768 .nway_reset = atl1_nway_reset,
2769 .get_ethtool_stats = atl1_get_ethtool_stats,
2770 .get_sset_count = atl1_get_sset_count,
2771 .set_tso = ethtool_op_set_tso,
2775 * Reset the transmit and receive units; mask and clear all interrupts.
2776 * hw - Struct containing variables accessed by shared code
2777 * return : 0 or idle status (if error)
2779 s32 atl1_reset_hw(struct atl1_hw *hw)
2781 struct pci_dev *pdev = hw->back->pdev;
2786 * Clear Interrupt mask to stop board from generating
2787 * interrupts & Clear any pending interrupt events
2790 * iowrite32(0, hw->hw_addr + REG_IMR);
2791 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2795 * Issue Soft Reset to the MAC. This will reset the chip's
2796 * transmit, receive, DMA. It will not effect
2797 * the current PCI configuration. The global reset bit is self-
2798 * clearing, and should clear within a microsecond.
2800 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2801 ioread32(hw->hw_addr + REG_MASTER_CTRL);
2803 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2804 ioread16(hw->hw_addr + REG_PHY_ENABLE);
2806 /* delay about 1ms */
2809 /* Wait at least 10ms for All module to be Idle */
2810 for (i = 0; i < 10; i++) {
2811 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2816 /* FIXME: still the right way to do this? */
2821 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2828 /* function about EEPROM
2830 * check_eeprom_exist
2831 * return 0 if eeprom exist
2833 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2836 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2837 if (value & SPI_FLASH_CTRL_EN_VPD) {
2838 value &= ~SPI_FLASH_CTRL_EN_VPD;
2839 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2842 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2843 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2846 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2852 /* address do not align */
2855 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2856 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2857 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2858 ioread32(hw->hw_addr + REG_VPD_CAP);
2860 for (i = 0; i < 10; i++) {
2862 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2863 if (control & VPD_CAP_VPD_FLAG)
2866 if (control & VPD_CAP_VPD_FLAG) {
2867 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2875 * Reads the value from a PHY register
2876 * hw - Struct containing variables accessed by shared code
2877 * reg_addr - address of the PHY register to read
2879 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2884 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2885 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2887 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2888 ioread32(hw->hw_addr + REG_MDIO_CTRL);
2890 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2892 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2893 if (!(val & (MDIO_START | MDIO_BUSY)))
2896 if (!(val & (MDIO_START | MDIO_BUSY))) {
2897 *phy_data = (u16) val;
2900 return ATLX_ERR_PHY;
2903 #define CUSTOM_SPI_CS_SETUP 2
2904 #define CUSTOM_SPI_CLK_HI 2
2905 #define CUSTOM_SPI_CLK_LO 2
2906 #define CUSTOM_SPI_CS_HOLD 2
2907 #define CUSTOM_SPI_CS_HI 3
2909 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2914 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2915 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2917 value = SPI_FLASH_CTRL_WAIT_READY |
2918 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2919 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2920 SPI_FLASH_CTRL_CLK_HI_MASK) <<
2921 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2922 SPI_FLASH_CTRL_CLK_LO_MASK) <<
2923 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2924 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2925 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2926 SPI_FLASH_CTRL_CS_HI_MASK) <<
2927 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2928 SPI_FLASH_CTRL_INS_SHIFT;
2930 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2932 value |= SPI_FLASH_CTRL_START;
2933 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2934 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2936 for (i = 0; i < 10; i++) {
2938 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2939 if (!(value & SPI_FLASH_CTRL_START))
2943 if (value & SPI_FLASH_CTRL_START)
2946 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2952 * get_permanent_address
2953 * return 0 if get valid mac address,
2955 static int atl1_get_permanent_address(struct atl1_hw *hw)
2960 u8 eth_addr[ETH_ALEN];
2963 if (is_valid_ether_addr(hw->perm_mac_addr))
2967 addr[0] = addr[1] = 0;
2969 if (!atl1_check_eeprom_exist(hw)) {
2972 /* Read out all EEPROM content */
2975 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2977 if (reg == REG_MAC_STA_ADDR)
2979 else if (reg == (REG_MAC_STA_ADDR + 4))
2982 } else if ((control & 0xff) == 0x5A) {
2984 reg = (u16) (control >> 16);
2993 *(u32 *) ð_addr[2] = swab32(addr[0]);
2994 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
2995 if (is_valid_ether_addr(eth_addr)) {
2996 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3002 /* see if SPI FLAGS exist ? */
3003 addr[0] = addr[1] = 0;
3008 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3010 if (reg == REG_MAC_STA_ADDR)
3012 else if (reg == (REG_MAC_STA_ADDR + 4))
3015 } else if ((control & 0xff) == 0x5A) {
3017 reg = (u16) (control >> 16);
3027 *(u32 *) ð_addr[2] = swab32(addr[0]);
3028 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
3029 if (is_valid_ether_addr(eth_addr)) {
3030 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3035 * On some motherboards, the MAC address is written by the
3036 * BIOS directly to the MAC register during POST, and is
3037 * not stored in eeprom. If all else thus far has failed
3038 * to fetch the permanent MAC address, try reading it directly.
3040 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3041 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3042 *(u32 *) ð_addr[2] = swab32(addr[0]);
3043 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]);
3044 if (is_valid_ether_addr(eth_addr)) {
3045 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3053 * Reads the adapter's MAC address from the EEPROM
3054 * hw - Struct containing variables accessed by shared code
3056 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3060 if (atl1_get_permanent_address(hw))
3061 random_ether_addr(hw->perm_mac_addr);
3063 for (i = 0; i < ETH_ALEN; i++)
3064 hw->mac_addr[i] = hw->perm_mac_addr[i];
3069 * Hashes an address to determine its location in the multicast table
3070 * hw - Struct containing variables accessed by shared code
3071 * mc_addr - the multicast address to hash
3075 * set hash value for a multicast address
3076 * hash calcu processing :
3077 * 1. calcu 32bit CRC for multicast address
3078 * 2. reverse crc with MSB to LSB
3080 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3082 u32 crc32, value = 0;
3085 crc32 = ether_crc_le(6, mc_addr);
3086 for (i = 0; i < 32; i++)
3087 value |= (((crc32 >> i) & 1) << (31 - i));
3093 * Sets the bit in the multicast table corresponding to the hash value.
3094 * hw - Struct containing variables accessed by shared code
3095 * hash_value - Multicast address hash value
3097 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3099 u32 hash_bit, hash_reg;
3103 * The HASH Table is a register array of 2 32-bit registers.
3104 * It is treated like an array of 64 bits. We want to set
3105 * bit BitArray[hash_value]. So we figure out what register
3106 * the bit is in, read it, OR in the new bit, then write
3107 * back the new value. The register is determined by the
3108 * upper 7 bits of the hash value and the bit within that
3109 * register are determined by the lower 5 bits of the value.
3111 hash_reg = (hash_value >> 31) & 0x1;
3112 hash_bit = (hash_value >> 26) & 0x1F;
3113 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3114 mta |= (1 << hash_bit);
3115 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3119 * Writes a value to a PHY register
3120 * hw - Struct containing variables accessed by shared code
3121 * reg_addr - address of the PHY register to write
3122 * data - data to write to the PHY
3124 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3129 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3130 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3132 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3133 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3134 ioread32(hw->hw_addr + REG_MDIO_CTRL);
3136 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3138 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3139 if (!(val & (MDIO_START | MDIO_BUSY)))
3143 if (!(val & (MDIO_START | MDIO_BUSY)))
3146 return ATLX_ERR_PHY;
3150 * Make L001's PHY out of Power Saving State (bug)
3151 * hw - Struct containing variables accessed by shared code
3152 * when power on, L001's PHY always on Power saving State
3153 * (Gigabit Link forbidden)
3155 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3158 ret = atl1_write_phy_reg(hw, 29, 0x0029);
3161 return atl1_write_phy_reg(hw, 30, 0);
3165 *TODO: do something or get rid of this
3167 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3174 ret_val = atl1_write_phy_reg(hw, ...);
3175 ret_val = atl1_write_phy_reg(hw, ...);
3182 * Resets the PHY and make all config validate
3183 * hw - Struct containing variables accessed by shared code
3185 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3187 static s32 atl1_phy_reset(struct atl1_hw *hw)
3189 struct pci_dev *pdev = hw->back->pdev;
3193 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3194 hw->media_type == MEDIA_TYPE_1000M_FULL)
3195 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3197 switch (hw->media_type) {
3198 case MEDIA_TYPE_100M_FULL:
3200 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3203 case MEDIA_TYPE_100M_HALF:
3204 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3206 case MEDIA_TYPE_10M_FULL:
3208 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3211 /* MEDIA_TYPE_10M_HALF: */
3212 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3217 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3221 /* pcie serdes link may be down! */
3222 dev_dbg(&pdev->dev, "pcie phy link down\n");
3224 for (i = 0; i < 25; i++) {
3226 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3227 if (!(val & (MDIO_START | MDIO_BUSY)))
3231 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3232 dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3240 * Configures PHY autoneg and flow control advertisement settings
3241 * hw - Struct containing variables accessed by shared code
3243 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3246 s16 mii_autoneg_adv_reg;
3247 s16 mii_1000t_ctrl_reg;
3249 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3250 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3252 /* Read the MII 1000Base-T Control Register (Address 9). */
3253 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3256 * First we clear all the 10/100 mb speed bits in the Auto-Neg
3257 * Advertisement Register (Address 4) and the 1000 mb speed bits in
3258 * the 1000Base-T Control Register (Address 9).
3260 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3261 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3264 * Need to parse media_type and set up
3265 * the appropriate PHY registers.
3267 switch (hw->media_type) {
3268 case MEDIA_TYPE_AUTO_SENSOR:
3269 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3270 MII_AR_10T_FD_CAPS |
3271 MII_AR_100TX_HD_CAPS |
3272 MII_AR_100TX_FD_CAPS);
3273 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3276 case MEDIA_TYPE_1000M_FULL:
3277 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3280 case MEDIA_TYPE_100M_FULL:
3281 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3284 case MEDIA_TYPE_100M_HALF:
3285 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3288 case MEDIA_TYPE_10M_FULL:
3289 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3293 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3297 /* flow control fixed to enable all */
3298 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3300 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3301 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3303 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3307 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3315 * Configures link settings.
3316 * hw - Struct containing variables accessed by shared code
3317 * Assumes the hardware has previously been reset and the
3318 * transmitter and receiver are not enabled.
3320 static s32 atl1_setup_link(struct atl1_hw *hw)
3322 struct pci_dev *pdev = hw->back->pdev;
3327 * PHY will advertise value(s) parsed from
3328 * autoneg_advertised and fc
3329 * no matter what autoneg is , We will not wait link result.
3331 ret_val = atl1_phy_setup_autoneg_adv(hw);
3333 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3336 /* SW.Reset , En-Auto-Neg if needed */
3337 ret_val = atl1_phy_reset(hw);
3339 dev_dbg(&pdev->dev, "error resetting phy\n");
3342 hw->phy_configured = true;
3346 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3348 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3350 hw->flash_vendor = 0;
3353 iowrite8(flash_table[hw->flash_vendor].cmd_program,
3354 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3355 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3356 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3357 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3358 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3359 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3360 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3361 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3362 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3363 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3364 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3365 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3366 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3367 iowrite8(flash_table[hw->flash_vendor].cmd_read,
3368 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3372 * Performs basic configuration of the adapter.
3373 * hw - Struct containing variables accessed by shared code
3374 * Assumes that the controller has previously been reset and is in a
3375 * post-reset uninitialized state. Initializes multicast table,
3376 * and Calls routines to setup link
3377 * Leaves the transmit and receive units disabled and uninitialized.
3379 s32 atl1_init_hw(struct atl1_hw *hw)
3383 /* Zero out the Multicast HASH table */
3384 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3385 /* clear the old settings from the multicast hash table */
3386 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3388 atl1_init_flash_opcode(hw);
3390 if (!hw->phy_configured) {
3391 /* enable GPHY LinkChange Interrrupt */
3392 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3395 /* make PHY out of power-saving state */
3396 ret_val = atl1_phy_leave_power_saving(hw);
3399 /* Call a subroutine to configure the link */
3400 ret_val = atl1_setup_link(hw);
3406 * Detects the current speed and duplex settings of the hardware.
3407 * hw - Struct containing variables accessed by shared code
3408 * speed - Speed of the connection
3409 * duplex - Duplex setting of the connection
3411 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3413 struct pci_dev *pdev = hw->back->pdev;
3417 /* ; --- Read PHY Specific Status Register (17) */
3418 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3422 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3423 return ATLX_ERR_PHY_RES;
3425 switch (phy_data & MII_ATLX_PSSR_SPEED) {
3426 case MII_ATLX_PSSR_1000MBS:
3427 *speed = SPEED_1000;
3429 case MII_ATLX_PSSR_100MBS:
3432 case MII_ATLX_PSSR_10MBS:
3436 dev_dbg(&pdev->dev, "error getting speed\n");
3437 return ATLX_ERR_PHY_SPEED;
3440 if (phy_data & MII_ATLX_PSSR_DPLX)
3441 *duplex = FULL_DUPLEX;
3443 *duplex = HALF_DUPLEX;
3448 void atl1_set_mac_addr(struct atl1_hw *hw)
3453 * 0: 6AF600DC 1: 000B
3456 value = (((u32) hw->mac_addr[2]) << 24) |
3457 (((u32) hw->mac_addr[3]) << 16) |
3458 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3459 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3461 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3462 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));