]> err.no Git - linux-2.6/blob - drivers/net/atlx/atl1.c
atl1: use csum_start
[linux-2.6] / drivers / net / atlx / atl1.c
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <xiong_huang@attansic.com>
28  * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29  * Xinzhu  302, TAIWAN, REPUBLIC OF CHINA
30  *
31  * Chris Snook <csnook@redhat.com>
32  * Jay Cliburn <jcliburn@gmail.com>
33  *
34  * This version is adapted from the Attansic reference driver for
35  * inclusion in the Linux kernel.  It is currently under heavy development.
36  * A very incomplete list of things that need to be dealt with:
37  *
38  * TODO:
39  * Wake on LAN.
40  * Add more ethtool functions.
41  * Fix abstruse irq enable/disable condition described here:
42  *      http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
43  *
44  * NEEDS TESTING:
45  * VLAN
46  * multicast
47  * promiscuous mode
48  * interrupt coalescing
49  * SMP torture testing
50  */
51
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
54
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
63 #include <linux/in.h>
64 #include <linux/interrupt.h>
65 #include <linux/ip.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
76 #include <linux/pm.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
85
86 #include <net/checksum.h>
87
88 #include "atl1.h"
89
90 /* Temporary hack for merging atl1 and atl2 */
91 #include "atlx.c"
92
93 /*
94  * atl1_pci_tbl - PCI Device ID Table
95  */
96 static const struct pci_device_id atl1_pci_tbl[] = {
97         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98         /* required last entry */
99         {0,}
100 };
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
102
103 /*
104  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105  * @adapter: board private structure to initialize
106  *
107  * atl1_sw_init initializes the Adapter private data structure.
108  * Fields are initialized based on PCI device information and
109  * OS network device settings (MTU size).
110  */
111 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
112 {
113         struct atl1_hw *hw = &adapter->hw;
114         struct net_device *netdev = adapter->netdev;
115
116         hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
117         hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
118
119         adapter->wol = 0;
120         adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
121         adapter->ict = 50000;           /* 100ms */
122         adapter->link_speed = SPEED_0;  /* hardware init */
123         adapter->link_duplex = FULL_DUPLEX;
124
125         hw->phy_configured = false;
126         hw->preamble_len = 7;
127         hw->ipgt = 0x60;
128         hw->min_ifg = 0x50;
129         hw->ipgr1 = 0x40;
130         hw->ipgr2 = 0x60;
131         hw->max_retry = 0xf;
132         hw->lcol = 0x37;
133         hw->jam_ipg = 7;
134         hw->rfd_burst = 8;
135         hw->rrd_burst = 8;
136         hw->rfd_fetch_gap = 1;
137         hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
138         hw->rx_jumbo_lkah = 1;
139         hw->rrd_ret_timer = 16;
140         hw->tpd_burst = 4;
141         hw->tpd_fetch_th = 16;
142         hw->txf_burst = 0x100;
143         hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
144         hw->tpd_fetch_gap = 1;
145         hw->rcb_value = atl1_rcb_64;
146         hw->dma_ord = atl1_dma_ord_enh;
147         hw->dmar_block = atl1_dma_req_256;
148         hw->dmaw_block = atl1_dma_req_256;
149         hw->cmb_rrd = 4;
150         hw->cmb_tpd = 4;
151         hw->cmb_rx_timer = 1;   /* about 2us */
152         hw->cmb_tx_timer = 1;   /* about 2us */
153         hw->smb_timer = 100000; /* about 200ms */
154
155         spin_lock_init(&adapter->lock);
156         spin_lock_init(&adapter->mb_lock);
157
158         return 0;
159 }
160
161 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
162 {
163         struct atl1_adapter *adapter = netdev_priv(netdev);
164         u16 result;
165
166         atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
167
168         return result;
169 }
170
171 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
172         int val)
173 {
174         struct atl1_adapter *adapter = netdev_priv(netdev);
175
176         atl1_write_phy_reg(&adapter->hw, reg_num, val);
177 }
178
179 /*
180  * atl1_mii_ioctl -
181  * @netdev:
182  * @ifreq:
183  * @cmd:
184  */
185 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
186 {
187         struct atl1_adapter *adapter = netdev_priv(netdev);
188         unsigned long flags;
189         int retval;
190
191         if (!netif_running(netdev))
192                 return -EINVAL;
193
194         spin_lock_irqsave(&adapter->lock, flags);
195         retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
196         spin_unlock_irqrestore(&adapter->lock, flags);
197
198         return retval;
199 }
200
201 /*
202  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
203  * @adapter: board private structure
204  *
205  * Return 0 on success, negative on failure
206  */
207 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
208 {
209         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
210         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
211         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
212         struct atl1_ring_header *ring_header = &adapter->ring_header;
213         struct pci_dev *pdev = adapter->pdev;
214         int size;
215         u8 offset = 0;
216
217         size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219         if (unlikely(!tpd_ring->buffer_info)) {
220                 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
221                 goto err_nomem;
222         }
223         rfd_ring->buffer_info =
224                 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
225
226         /*
227          * real ring DMA buffer
228          * each ring/block may need up to 8 bytes for alignment, hence the
229          * additional 40 bytes tacked onto the end.
230          */
231         ring_header->size = size =
232                 sizeof(struct tx_packet_desc) * tpd_ring->count
233                 + sizeof(struct rx_free_desc) * rfd_ring->count
234                 + sizeof(struct rx_return_desc) * rrd_ring->count
235                 + sizeof(struct coals_msg_block)
236                 + sizeof(struct stats_msg_block)
237                 + 40;
238
239         ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
240                 &ring_header->dma);
241         if (unlikely(!ring_header->desc)) {
242                 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
243                 goto err_nomem;
244         }
245
246         memset(ring_header->desc, 0, ring_header->size);
247
248         /* init TPD ring */
249         tpd_ring->dma = ring_header->dma;
250         offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
251         tpd_ring->dma += offset;
252         tpd_ring->desc = (u8 *) ring_header->desc + offset;
253         tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
254
255         /* init RFD ring */
256         rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
257         offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
258         rfd_ring->dma += offset;
259         rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
260         rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
261
262
263         /* init RRD ring */
264         rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
265         offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
266         rrd_ring->dma += offset;
267         rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
268         rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
269
270
271         /* init CMB */
272         adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
273         offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
274         adapter->cmb.dma += offset;
275         adapter->cmb.cmb = (struct coals_msg_block *)
276                 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
277
278         /* init SMB */
279         adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
280         offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
281         adapter->smb.dma += offset;
282         adapter->smb.smb = (struct stats_msg_block *)
283                 ((u8 *) adapter->cmb.cmb +
284                 (sizeof(struct coals_msg_block) + offset));
285
286         return 0;
287
288 err_nomem:
289         kfree(tpd_ring->buffer_info);
290         return -ENOMEM;
291 }
292
293 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
294 {
295         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
296         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
297         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
298
299         atomic_set(&tpd_ring->next_to_use, 0);
300         atomic_set(&tpd_ring->next_to_clean, 0);
301
302         rfd_ring->next_to_clean = 0;
303         atomic_set(&rfd_ring->next_to_use, 0);
304
305         rrd_ring->next_to_use = 0;
306         atomic_set(&rrd_ring->next_to_clean, 0);
307 }
308
309 /*
310  * atl1_clean_rx_ring - Free RFD Buffers
311  * @adapter: board private structure
312  */
313 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
314 {
315         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
316         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
317         struct atl1_buffer *buffer_info;
318         struct pci_dev *pdev = adapter->pdev;
319         unsigned long size;
320         unsigned int i;
321
322         /* Free all the Rx ring sk_buffs */
323         for (i = 0; i < rfd_ring->count; i++) {
324                 buffer_info = &rfd_ring->buffer_info[i];
325                 if (buffer_info->dma) {
326                         pci_unmap_page(pdev, buffer_info->dma,
327                                 buffer_info->length, PCI_DMA_FROMDEVICE);
328                         buffer_info->dma = 0;
329                 }
330                 if (buffer_info->skb) {
331                         dev_kfree_skb(buffer_info->skb);
332                         buffer_info->skb = NULL;
333                 }
334         }
335
336         size = sizeof(struct atl1_buffer) * rfd_ring->count;
337         memset(rfd_ring->buffer_info, 0, size);
338
339         /* Zero out the descriptor ring */
340         memset(rfd_ring->desc, 0, rfd_ring->size);
341
342         rfd_ring->next_to_clean = 0;
343         atomic_set(&rfd_ring->next_to_use, 0);
344
345         rrd_ring->next_to_use = 0;
346         atomic_set(&rrd_ring->next_to_clean, 0);
347 }
348
349 /*
350  * atl1_clean_tx_ring - Free Tx Buffers
351  * @adapter: board private structure
352  */
353 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
354 {
355         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
356         struct atl1_buffer *buffer_info;
357         struct pci_dev *pdev = adapter->pdev;
358         unsigned long size;
359         unsigned int i;
360
361         /* Free all the Tx ring sk_buffs */
362         for (i = 0; i < tpd_ring->count; i++) {
363                 buffer_info = &tpd_ring->buffer_info[i];
364                 if (buffer_info->dma) {
365                         pci_unmap_page(pdev, buffer_info->dma,
366                                 buffer_info->length, PCI_DMA_TODEVICE);
367                         buffer_info->dma = 0;
368                 }
369         }
370
371         for (i = 0; i < tpd_ring->count; i++) {
372                 buffer_info = &tpd_ring->buffer_info[i];
373                 if (buffer_info->skb) {
374                         dev_kfree_skb_any(buffer_info->skb);
375                         buffer_info->skb = NULL;
376                 }
377         }
378
379         size = sizeof(struct atl1_buffer) * tpd_ring->count;
380         memset(tpd_ring->buffer_info, 0, size);
381
382         /* Zero out the descriptor ring */
383         memset(tpd_ring->desc, 0, tpd_ring->size);
384
385         atomic_set(&tpd_ring->next_to_use, 0);
386         atomic_set(&tpd_ring->next_to_clean, 0);
387 }
388
389 /*
390  * atl1_free_ring_resources - Free Tx / RX descriptor Resources
391  * @adapter: board private structure
392  *
393  * Free all transmit software resources
394  */
395 void atl1_free_ring_resources(struct atl1_adapter *adapter)
396 {
397         struct pci_dev *pdev = adapter->pdev;
398         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
399         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
400         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
401         struct atl1_ring_header *ring_header = &adapter->ring_header;
402
403         atl1_clean_tx_ring(adapter);
404         atl1_clean_rx_ring(adapter);
405
406         kfree(tpd_ring->buffer_info);
407         pci_free_consistent(pdev, ring_header->size, ring_header->desc,
408                 ring_header->dma);
409
410         tpd_ring->buffer_info = NULL;
411         tpd_ring->desc = NULL;
412         tpd_ring->dma = 0;
413
414         rfd_ring->buffer_info = NULL;
415         rfd_ring->desc = NULL;
416         rfd_ring->dma = 0;
417
418         rrd_ring->desc = NULL;
419         rrd_ring->dma = 0;
420 }
421
422 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
423 {
424         u32 value;
425         struct atl1_hw *hw = &adapter->hw;
426         struct net_device *netdev = adapter->netdev;
427         /* Config MAC CTRL Register */
428         value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
429         /* duplex */
430         if (FULL_DUPLEX == adapter->link_duplex)
431                 value |= MAC_CTRL_DUPLX;
432         /* speed */
433         value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
434                          MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
435                   MAC_CTRL_SPEED_SHIFT);
436         /* flow control */
437         value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
438         /* PAD & CRC */
439         value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
440         /* preamble length */
441         value |= (((u32) adapter->hw.preamble_len
442                    & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
443         /* vlan */
444         if (adapter->vlgrp)
445                 value |= MAC_CTRL_RMV_VLAN;
446         /* rx checksum
447            if (adapter->rx_csum)
448            value |= MAC_CTRL_RX_CHKSUM_EN;
449          */
450         /* filter mode */
451         value |= MAC_CTRL_BC_EN;
452         if (netdev->flags & IFF_PROMISC)
453                 value |= MAC_CTRL_PROMIS_EN;
454         else if (netdev->flags & IFF_ALLMULTI)
455                 value |= MAC_CTRL_MC_ALL_EN;
456         /* value |= MAC_CTRL_LOOPBACK; */
457         iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
458 }
459
460 static u32 atl1_check_link(struct atl1_adapter *adapter)
461 {
462         struct atl1_hw *hw = &adapter->hw;
463         struct net_device *netdev = adapter->netdev;
464         u32 ret_val;
465         u16 speed, duplex, phy_data;
466         int reconfig = 0;
467
468         /* MII_BMSR must read twice */
469         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
470         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471         if (!(phy_data & BMSR_LSTATUS)) {
472                 /* link down */
473                 if (netif_carrier_ok(netdev)) {
474                         /* old link state: Up */
475                         dev_info(&adapter->pdev->dev, "link is down\n");
476                         adapter->link_speed = SPEED_0;
477                         netif_carrier_off(netdev);
478                         netif_stop_queue(netdev);
479                 }
480                 return 0;
481         }
482
483         /* Link Up */
484         ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
485         if (ret_val)
486                 return ret_val;
487
488         switch (hw->media_type) {
489         case MEDIA_TYPE_1000M_FULL:
490                 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
491                         reconfig = 1;
492                 break;
493         case MEDIA_TYPE_100M_FULL:
494                 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
495                         reconfig = 1;
496                 break;
497         case MEDIA_TYPE_100M_HALF:
498                 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
499                         reconfig = 1;
500                 break;
501         case MEDIA_TYPE_10M_FULL:
502                 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
503                         reconfig = 1;
504                 break;
505         case MEDIA_TYPE_10M_HALF:
506                 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
507                         reconfig = 1;
508                 break;
509         }
510
511         /* link result is our setting */
512         if (!reconfig) {
513                 if (adapter->link_speed != speed
514                     || adapter->link_duplex != duplex) {
515                         adapter->link_speed = speed;
516                         adapter->link_duplex = duplex;
517                         atl1_setup_mac_ctrl(adapter);
518                         dev_info(&adapter->pdev->dev,
519                                 "%s link is up %d Mbps %s\n",
520                                 netdev->name, adapter->link_speed,
521                                 adapter->link_duplex == FULL_DUPLEX ?
522                                 "full duplex" : "half duplex");
523                 }
524                 if (!netif_carrier_ok(netdev)) {
525                         /* Link down -> Up */
526                         netif_carrier_on(netdev);
527                         netif_wake_queue(netdev);
528                 }
529                 return 0;
530         }
531
532         /* change original link status */
533         if (netif_carrier_ok(netdev)) {
534                 adapter->link_speed = SPEED_0;
535                 netif_carrier_off(netdev);
536                 netif_stop_queue(netdev);
537         }
538
539         if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
540             hw->media_type != MEDIA_TYPE_1000M_FULL) {
541                 switch (hw->media_type) {
542                 case MEDIA_TYPE_100M_FULL:
543                         phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
544                                    MII_CR_RESET;
545                         break;
546                 case MEDIA_TYPE_100M_HALF:
547                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
548                         break;
549                 case MEDIA_TYPE_10M_FULL:
550                         phy_data =
551                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
552                         break;
553                 default:
554                         /* MEDIA_TYPE_10M_HALF: */
555                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
556                         break;
557                 }
558                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
559                 return 0;
560         }
561
562         /* auto-neg, insert timer to re-config phy */
563         if (!adapter->phy_timer_pending) {
564                 adapter->phy_timer_pending = true;
565                 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
566         }
567
568         return 0;
569 }
570
571 /*
572  * atl1_change_mtu - Change the Maximum Transfer Unit
573  * @netdev: network interface device structure
574  * @new_mtu: new value for maximum frame size
575  *
576  * Returns 0 on success, negative on failure
577  */
578 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
579 {
580         struct atl1_adapter *adapter = netdev_priv(netdev);
581         int old_mtu = netdev->mtu;
582         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
583
584         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586                 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
587                 return -EINVAL;
588         }
589
590         adapter->hw.max_frame_size = max_frame;
591         adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
592         adapter->rx_buffer_len = (max_frame + 7) & ~7;
593         adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
594
595         netdev->mtu = new_mtu;
596         if ((old_mtu != new_mtu) && netif_running(netdev)) {
597                 atl1_down(adapter);
598                 atl1_up(adapter);
599         }
600
601         return 0;
602 }
603
604 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
605 {
606         u32 hi, lo, value;
607
608         /* RFD Flow Control */
609         value = adapter->rfd_ring.count;
610         hi = value / 16;
611         if (hi < 2)
612                 hi = 2;
613         lo = value * 7 / 8;
614
615         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
616                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
617         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
618
619         /* RRD Flow Control */
620         value = adapter->rrd_ring.count;
621         lo = value / 16;
622         hi = value * 7 / 8;
623         if (lo < 2)
624                 lo = 2;
625         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
626                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
627         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
628 }
629
630 static void set_flow_ctrl_new(struct atl1_hw *hw)
631 {
632         u32 hi, lo, value;
633
634         /* RXF Flow Control */
635         value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
636         lo = value / 16;
637         if (lo < 192)
638                 lo = 192;
639         hi = value * 7 / 8;
640         if (hi < lo)
641                 hi = lo + 16;
642         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
643                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
644         iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
645
646         /* RRD Flow Control */
647         value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
648         lo = value / 8;
649         hi = value * 7 / 8;
650         if (lo < 2)
651                 lo = 2;
652         if (hi < lo)
653                 hi = lo + 3;
654         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
655                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
656         iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
657 }
658
659 /*
660  * atl1_configure - Configure Transmit&Receive Unit after Reset
661  * @adapter: board private structure
662  *
663  * Configure the Tx /Rx unit of the MAC after a reset.
664  */
665 static u32 atl1_configure(struct atl1_adapter *adapter)
666 {
667         struct atl1_hw *hw = &adapter->hw;
668         u32 value;
669
670         /* clear interrupt status */
671         iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
672
673         /* set MAC Address */
674         value = (((u32) hw->mac_addr[2]) << 24) |
675                 (((u32) hw->mac_addr[3]) << 16) |
676                 (((u32) hw->mac_addr[4]) << 8) |
677                 (((u32) hw->mac_addr[5]));
678         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
679         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
680         iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
681
682         /* tx / rx ring */
683
684         /* HI base address */
685         iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
686                 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
687         /* LO base address */
688         iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
689                 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
690         iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
691                 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
692         iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
693                 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
694         iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
695                 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
696         iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
697                 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
698
699         /* element count */
700         value = adapter->rrd_ring.count;
701         value <<= 16;
702         value += adapter->rfd_ring.count;
703         iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
704         iowrite32(adapter->tpd_ring.count, hw->hw_addr +
705                 REG_DESC_TPD_RING_SIZE);
706
707         /* Load Ptr */
708         iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
709
710         /* config Mailbox */
711         value = ((atomic_read(&adapter->tpd_ring.next_to_use)
712                   & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
713                 ((atomic_read(&adapter->rrd_ring.next_to_clean)
714                 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
715                 ((atomic_read(&adapter->rfd_ring.next_to_use)
716                 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
717         iowrite32(value, hw->hw_addr + REG_MAILBOX);
718
719         /* config IPG/IFG */
720         value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
721                  << MAC_IPG_IFG_IPGT_SHIFT) |
722                 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
723                 << MAC_IPG_IFG_MIFG_SHIFT) |
724                 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
725                 << MAC_IPG_IFG_IPGR1_SHIFT) |
726                 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
727                 << MAC_IPG_IFG_IPGR2_SHIFT);
728         iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
729
730         /* config  Half-Duplex Control */
731         value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
732                 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
733                 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
734                 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
735                 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
736                 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
737                 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
738         iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
739
740         /* set Interrupt Moderator Timer */
741         iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
742         iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
743
744         /* set Interrupt Clear Timer */
745         iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
746
747         /* set max frame size hw will accept */
748         iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
749
750         /* jumbo size & rrd retirement timer */
751         value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
752                  << RXQ_JMBOSZ_TH_SHIFT) |
753                 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
754                 << RXQ_JMBO_LKAH_SHIFT) |
755                 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
756                 << RXQ_RRD_TIMER_SHIFT);
757         iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
758
759         /* Flow Control */
760         switch (hw->dev_rev) {
761         case 0x8001:
762         case 0x9001:
763         case 0x9002:
764         case 0x9003:
765                 set_flow_ctrl_old(adapter);
766                 break;
767         default:
768                 set_flow_ctrl_new(hw);
769                 break;
770         }
771
772         /* config TXQ */
773         value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
774                  << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
775                 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
776                 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
777                 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
778                 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
779                 TXQ_CTRL_EN;
780         iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
781
782         /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
783         value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
784                 << TX_JUMBO_TASK_TH_SHIFT) |
785                 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
786                 << TX_TPD_MIN_IPG_SHIFT);
787         iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
788
789         /* config RXQ */
790         value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
791                 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
792                 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
793                 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
794                 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
795                 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
796                 RXQ_CTRL_EN;
797         iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
798
799         /* config DMA Engine */
800         value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
801                 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
802                 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
803                 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
804                 DMA_CTRL_DMAW_EN;
805         value |= (u32) hw->dma_ord;
806         if (atl1_rcb_128 == hw->rcb_value)
807                 value |= DMA_CTRL_RCB_VALUE;
808         iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
809
810         /* config CMB / SMB */
811         value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
812                 hw->cmb_tpd : adapter->tpd_ring.count;
813         value <<= 16;
814         value |= hw->cmb_rrd;
815         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
816         value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
817         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
818         iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
819
820         /* --- enable CMB / SMB */
821         value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
822         iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
823
824         value = ioread32(adapter->hw.hw_addr + REG_ISR);
825         if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
826                 value = 1;      /* config failed */
827         else
828                 value = 0;
829
830         /* clear all interrupt status */
831         iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
832         iowrite32(0, adapter->hw.hw_addr + REG_ISR);
833         return value;
834 }
835
836 /*
837  * atl1_pcie_patch - Patch for PCIE module
838  */
839 static void atl1_pcie_patch(struct atl1_adapter *adapter)
840 {
841         u32 value;
842
843         /* much vendor magic here */
844         value = 0x6500;
845         iowrite32(value, adapter->hw.hw_addr + 0x12FC);
846         /* pcie flow control mode change */
847         value = ioread32(adapter->hw.hw_addr + 0x1008);
848         value |= 0x8000;
849         iowrite32(value, adapter->hw.hw_addr + 0x1008);
850 }
851
852 /*
853  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
854  * on PCI Command register is disable.
855  * The function enable this bit.
856  * Brackett, 2006/03/15
857  */
858 static void atl1_via_workaround(struct atl1_adapter *adapter)
859 {
860         unsigned long value;
861
862         value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
863         if (value & PCI_COMMAND_INTX_DISABLE)
864                 value &= ~PCI_COMMAND_INTX_DISABLE;
865         iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
866 }
867
868 static void atl1_inc_smb(struct atl1_adapter *adapter)
869 {
870         struct stats_msg_block *smb = adapter->smb.smb;
871
872         /* Fill out the OS statistics structure */
873         adapter->soft_stats.rx_packets += smb->rx_ok;
874         adapter->soft_stats.tx_packets += smb->tx_ok;
875         adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
876         adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
877         adapter->soft_stats.multicast += smb->rx_mcast;
878         adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
879                 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
880
881         /* Rx Errors */
882         adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
883                 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
884                 smb->rx_rrd_ov + smb->rx_align_err);
885         adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
886         adapter->soft_stats.rx_length_errors += smb->rx_len_err;
887         adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
888         adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
889         adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
890                 smb->rx_rxf_ov);
891
892         adapter->soft_stats.rx_pause += smb->rx_pause;
893         adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
894         adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
895
896         /* Tx Errors */
897         adapter->soft_stats.tx_errors += (smb->tx_late_col +
898                 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
899         adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
900         adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
901         adapter->soft_stats.tx_window_errors += smb->tx_late_col;
902
903         adapter->soft_stats.excecol += smb->tx_abort_col;
904         adapter->soft_stats.deffer += smb->tx_defer;
905         adapter->soft_stats.scc += smb->tx_1_col;
906         adapter->soft_stats.mcc += smb->tx_2_col;
907         adapter->soft_stats.latecol += smb->tx_late_col;
908         adapter->soft_stats.tx_underun += smb->tx_underrun;
909         adapter->soft_stats.tx_trunc += smb->tx_trunc;
910         adapter->soft_stats.tx_pause += smb->tx_pause;
911
912         adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
913         adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
914         adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
915         adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
916         adapter->net_stats.multicast = adapter->soft_stats.multicast;
917         adapter->net_stats.collisions = adapter->soft_stats.collisions;
918         adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
919         adapter->net_stats.rx_over_errors =
920                 adapter->soft_stats.rx_missed_errors;
921         adapter->net_stats.rx_length_errors =
922                 adapter->soft_stats.rx_length_errors;
923         adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
924         adapter->net_stats.rx_frame_errors =
925                 adapter->soft_stats.rx_frame_errors;
926         adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
927         adapter->net_stats.rx_missed_errors =
928                 adapter->soft_stats.rx_missed_errors;
929         adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
930         adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
931         adapter->net_stats.tx_aborted_errors =
932                 adapter->soft_stats.tx_aborted_errors;
933         adapter->net_stats.tx_window_errors =
934                 adapter->soft_stats.tx_window_errors;
935         adapter->net_stats.tx_carrier_errors =
936                 adapter->soft_stats.tx_carrier_errors;
937 }
938
939 static void atl1_update_mailbox(struct atl1_adapter *adapter)
940 {
941         unsigned long flags;
942         u32 tpd_next_to_use;
943         u32 rfd_next_to_use;
944         u32 rrd_next_to_clean;
945         u32 value;
946
947         spin_lock_irqsave(&adapter->mb_lock, flags);
948
949         tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
950         rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
951         rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
952
953         value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
954                 MB_RFD_PROD_INDX_SHIFT) |
955                 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
956                 MB_RRD_CONS_INDX_SHIFT) |
957                 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
958                 MB_TPD_PROD_INDX_SHIFT);
959         iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
960
961         spin_unlock_irqrestore(&adapter->mb_lock, flags);
962 }
963
964 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
965         struct rx_return_desc *rrd, u16 offset)
966 {
967         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
968
969         while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
970                 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
971                 if (++rfd_ring->next_to_clean == rfd_ring->count) {
972                         rfd_ring->next_to_clean = 0;
973                 }
974         }
975 }
976
977 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
978         struct rx_return_desc *rrd)
979 {
980         u16 num_buf;
981
982         num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
983                 adapter->rx_buffer_len;
984         if (rrd->num_buf == num_buf)
985                 /* clean alloc flag for bad rrd */
986                 atl1_clean_alloc_flag(adapter, rrd, num_buf);
987 }
988
989 static void atl1_rx_checksum(struct atl1_adapter *adapter,
990         struct rx_return_desc *rrd, struct sk_buff *skb)
991 {
992         struct pci_dev *pdev = adapter->pdev;
993
994         skb->ip_summed = CHECKSUM_NONE;
995
996         if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
997                 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998                                         ERR_FLAG_CODE | ERR_FLAG_OV)) {
999                         adapter->hw_csum_err++;
1000                         dev_printk(KERN_DEBUG, &pdev->dev,
1001                                 "rx checksum error\n");
1002                         return;
1003                 }
1004         }
1005
1006         /* not IPv4 */
1007         if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1008                 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1009                 return;
1010
1011         /* IPv4 packet */
1012         if (likely(!(rrd->err_flg &
1013                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1014                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1015                 adapter->hw_csum_good++;
1016                 return;
1017         }
1018
1019         /* IPv4, but hardware thinks its checksum is wrong */
1020         dev_printk(KERN_DEBUG, &pdev->dev,
1021                 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1022                 rrd->pkt_flg, rrd->err_flg);
1023         skb->ip_summed = CHECKSUM_COMPLETE;
1024         skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025         adapter->hw_csum_err++;
1026         return;
1027 }
1028
1029 /*
1030  * atl1_alloc_rx_buffers - Replace used receive buffers
1031  * @adapter: address of board private structure
1032  */
1033 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1034 {
1035         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1036         struct pci_dev *pdev = adapter->pdev;
1037         struct page *page;
1038         unsigned long offset;
1039         struct atl1_buffer *buffer_info, *next_info;
1040         struct sk_buff *skb;
1041         u16 num_alloc = 0;
1042         u16 rfd_next_to_use, next_next;
1043         struct rx_free_desc *rfd_desc;
1044
1045         next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1046         if (++next_next == rfd_ring->count)
1047                 next_next = 0;
1048         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1049         next_info = &rfd_ring->buffer_info[next_next];
1050
1051         while (!buffer_info->alloced && !next_info->alloced) {
1052                 if (buffer_info->skb) {
1053                         buffer_info->alloced = 1;
1054                         goto next;
1055                 }
1056
1057                 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1058
1059                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1060                 if (unlikely(!skb)) {
1061                         /* Better luck next round */
1062                         adapter->net_stats.rx_dropped++;
1063                         break;
1064                 }
1065
1066                 /*
1067                  * Make buffer alignment 2 beyond a 16 byte boundary
1068                  * this will result in a 16 byte aligned IP header after
1069                  * the 14 byte MAC header is removed
1070                  */
1071                 skb_reserve(skb, NET_IP_ALIGN);
1072
1073                 buffer_info->alloced = 1;
1074                 buffer_info->skb = skb;
1075                 buffer_info->length = (u16) adapter->rx_buffer_len;
1076                 page = virt_to_page(skb->data);
1077                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1078                 buffer_info->dma = pci_map_page(pdev, page, offset,
1079                                                 adapter->rx_buffer_len,
1080                                                 PCI_DMA_FROMDEVICE);
1081                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1082                 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1083                 rfd_desc->coalese = 0;
1084
1085 next:
1086                 rfd_next_to_use = next_next;
1087                 if (unlikely(++next_next == rfd_ring->count))
1088                         next_next = 0;
1089
1090                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1091                 next_info = &rfd_ring->buffer_info[next_next];
1092                 num_alloc++;
1093         }
1094
1095         if (num_alloc) {
1096                 /*
1097                  * Force memory writes to complete before letting h/w
1098                  * know there are new descriptors to fetch.  (Only
1099                  * applicable for weak-ordered memory model archs,
1100                  * such as IA-64).
1101                  */
1102                 wmb();
1103                 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1104         }
1105         return num_alloc;
1106 }
1107
1108 static void atl1_intr_rx(struct atl1_adapter *adapter)
1109 {
1110         int i, count;
1111         u16 length;
1112         u16 rrd_next_to_clean;
1113         u32 value;
1114         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1115         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1116         struct atl1_buffer *buffer_info;
1117         struct rx_return_desc *rrd;
1118         struct sk_buff *skb;
1119
1120         count = 0;
1121
1122         rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1123
1124         while (1) {
1125                 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1126                 i = 1;
1127                 if (likely(rrd->xsz.valid)) {   /* packet valid */
1128 chk_rrd:
1129                         /* check rrd status */
1130                         if (likely(rrd->num_buf == 1))
1131                                 goto rrd_ok;
1132
1133                         /* rrd seems to be bad */
1134                         if (unlikely(i-- > 0)) {
1135                                 /* rrd may not be DMAed completely */
1136                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1137                                         "incomplete RRD DMA transfer\n");
1138                                 udelay(1);
1139                                 goto chk_rrd;
1140                         }
1141                         /* bad rrd */
1142                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1143                                 "bad RRD\n");
1144                         /* see if update RFD index */
1145                         if (rrd->num_buf > 1)
1146                                 atl1_update_rfd_index(adapter, rrd);
1147
1148                         /* update rrd */
1149                         rrd->xsz.valid = 0;
1150                         if (++rrd_next_to_clean == rrd_ring->count)
1151                                 rrd_next_to_clean = 0;
1152                         count++;
1153                         continue;
1154                 } else {        /* current rrd still not be updated */
1155
1156                         break;
1157                 }
1158 rrd_ok:
1159                 /* clean alloc flag for bad rrd */
1160                 atl1_clean_alloc_flag(adapter, rrd, 0);
1161
1162                 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1163                 if (++rfd_ring->next_to_clean == rfd_ring->count)
1164                         rfd_ring->next_to_clean = 0;
1165
1166                 /* update rrd next to clean */
1167                 if (++rrd_next_to_clean == rrd_ring->count)
1168                         rrd_next_to_clean = 0;
1169                 count++;
1170
1171                 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1172                         if (!(rrd->err_flg &
1173                                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1174                                 | ERR_FLAG_LEN))) {
1175                                 /* packet error, don't need upstream */
1176                                 buffer_info->alloced = 0;
1177                                 rrd->xsz.valid = 0;
1178                                 continue;
1179                         }
1180                 }
1181
1182                 /* Good Receive */
1183                 pci_unmap_page(adapter->pdev, buffer_info->dma,
1184                                buffer_info->length, PCI_DMA_FROMDEVICE);
1185                 skb = buffer_info->skb;
1186                 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1187
1188                 skb_put(skb, length - ETH_FCS_LEN);
1189
1190                 /* Receive Checksum Offload */
1191                 atl1_rx_checksum(adapter, rrd, skb);
1192                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1193
1194                 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1195                         u16 vlan_tag = (rrd->vlan_tag >> 4) |
1196                                         ((rrd->vlan_tag & 7) << 13) |
1197                                         ((rrd->vlan_tag & 8) << 9);
1198                         vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1199                 } else
1200                         netif_rx(skb);
1201
1202                 /* let protocol layer free skb */
1203                 buffer_info->skb = NULL;
1204                 buffer_info->alloced = 0;
1205                 rrd->xsz.valid = 0;
1206
1207                 adapter->netdev->last_rx = jiffies;
1208         }
1209
1210         atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1211
1212         atl1_alloc_rx_buffers(adapter);
1213
1214         /* update mailbox ? */
1215         if (count) {
1216                 u32 tpd_next_to_use;
1217                 u32 rfd_next_to_use;
1218
1219                 spin_lock(&adapter->mb_lock);
1220
1221                 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1222                 rfd_next_to_use =
1223                     atomic_read(&adapter->rfd_ring.next_to_use);
1224                 rrd_next_to_clean =
1225                     atomic_read(&adapter->rrd_ring.next_to_clean);
1226                 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1227                         MB_RFD_PROD_INDX_SHIFT) |
1228                         ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1229                         MB_RRD_CONS_INDX_SHIFT) |
1230                         ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1231                         MB_TPD_PROD_INDX_SHIFT);
1232                 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1233                 spin_unlock(&adapter->mb_lock);
1234         }
1235 }
1236
1237 static void atl1_intr_tx(struct atl1_adapter *adapter)
1238 {
1239         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1240         struct atl1_buffer *buffer_info;
1241         u16 sw_tpd_next_to_clean;
1242         u16 cmb_tpd_next_to_clean;
1243
1244         sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1245         cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1246
1247         while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1248                 struct tx_packet_desc *tpd;
1249
1250                 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1251                 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1252                 if (buffer_info->dma) {
1253                         pci_unmap_page(adapter->pdev, buffer_info->dma,
1254                                        buffer_info->length, PCI_DMA_TODEVICE);
1255                         buffer_info->dma = 0;
1256                 }
1257
1258                 if (buffer_info->skb) {
1259                         dev_kfree_skb_irq(buffer_info->skb);
1260                         buffer_info->skb = NULL;
1261                 }
1262
1263                 if (++sw_tpd_next_to_clean == tpd_ring->count)
1264                         sw_tpd_next_to_clean = 0;
1265         }
1266         atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1267
1268         if (netif_queue_stopped(adapter->netdev)
1269             && netif_carrier_ok(adapter->netdev))
1270                 netif_wake_queue(adapter->netdev);
1271 }
1272
1273 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1274 {
1275         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1276         u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1277         return ((next_to_clean > next_to_use) ?
1278                 next_to_clean - next_to_use - 1 :
1279                 tpd_ring->count + next_to_clean - next_to_use - 1);
1280 }
1281
1282 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1283         struct tx_packet_desc *ptpd)
1284 {
1285         /* spinlock held */
1286         u8 hdr_len, ip_off;
1287         u32 real_len;
1288         int err;
1289
1290         if (skb_shinfo(skb)->gso_size) {
1291                 if (skb_header_cloned(skb)) {
1292                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1293                         if (unlikely(err))
1294                                 return -1;
1295                 }
1296
1297                 if (skb->protocol == ntohs(ETH_P_IP)) {
1298                         struct iphdr *iph = ip_hdr(skb);
1299
1300                         real_len = (((unsigned char *)iph - skb->data) +
1301                                 ntohs(iph->tot_len));
1302                         if (real_len < skb->len)
1303                                 pskb_trim(skb, real_len);
1304                         hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1305                         if (skb->len == hdr_len) {
1306                                 iph->check = 0;
1307                                 tcp_hdr(skb)->check =
1308                                         ~csum_tcpudp_magic(iph->saddr,
1309                                         iph->daddr, tcp_hdrlen(skb),
1310                                         IPPROTO_TCP, 0);
1311                                 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1312                                         TPD_IPHL_SHIFT;
1313                                 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1314                                         TPD_TCPHDRLEN_MASK) <<
1315                                         TPD_TCPHDRLEN_SHIFT;
1316                                 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
1317                                 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
1318                                 return 1;
1319                         }
1320
1321                         iph->check = 0;
1322                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1323                                         iph->daddr, 0, IPPROTO_TCP, 0);
1324                         ip_off = (unsigned char *)iph -
1325                                 (unsigned char *) skb_network_header(skb);
1326                         if (ip_off == 8) /* 802.3-SNAP frame */
1327                                 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1328                         else if (ip_off != 0)
1329                                 return -2;
1330
1331                         ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1332                                 TPD_IPHL_SHIFT;
1333                         ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1334                                 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1335                         ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1336                                 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1337                         ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1338                         return 3;
1339                 }
1340         }
1341         return false;
1342 }
1343
1344 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1345         struct tx_packet_desc *ptpd)
1346 {
1347         u8 css, cso;
1348
1349         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1350                 css = (u8) (skb->csum_start - skb_headroom(skb));
1351                 cso = css + (u8) skb->csum_offset;
1352                 if (unlikely(css & 0x1)) {
1353                         /* L1 hardware requires an even number here */
1354                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1355                                 "payload offset not an even number\n");
1356                         return -1;
1357                 }
1358                 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
1359                         TPD_PLOADOFFSET_SHIFT;
1360                 ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
1361                         TPD_CCSUMOFFSET_SHIFT;
1362                 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1363                 return true;
1364         }
1365         return 0;
1366 }
1367
1368 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1369         struct tx_packet_desc *ptpd)
1370 {
1371         /* spinlock held */
1372         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1373         struct atl1_buffer *buffer_info;
1374         u16 buf_len = skb->len;
1375         struct page *page;
1376         unsigned long offset;
1377         unsigned int nr_frags;
1378         unsigned int f;
1379         int retval;
1380         u16 next_to_use;
1381         u16 data_len;
1382         u8 hdr_len;
1383
1384         buf_len -= skb->data_len;
1385         nr_frags = skb_shinfo(skb)->nr_frags;
1386         next_to_use = atomic_read(&tpd_ring->next_to_use);
1387         buffer_info = &tpd_ring->buffer_info[next_to_use];
1388         if (unlikely(buffer_info->skb))
1389                 BUG();
1390         /* put skb in last TPD */
1391         buffer_info->skb = NULL;
1392
1393         retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1394         if (retval) {
1395                 /* TSO */
1396                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1397                 buffer_info->length = hdr_len;
1398                 page = virt_to_page(skb->data);
1399                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1400                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1401                                                 offset, hdr_len,
1402                                                 PCI_DMA_TODEVICE);
1403
1404                 if (++next_to_use == tpd_ring->count)
1405                         next_to_use = 0;
1406
1407                 if (buf_len > hdr_len) {
1408                         int i, nseg;
1409
1410                         data_len = buf_len - hdr_len;
1411                         nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1412                                 ATL1_MAX_TX_BUF_LEN;
1413                         for (i = 0; i < nseg; i++) {
1414                                 buffer_info =
1415                                     &tpd_ring->buffer_info[next_to_use];
1416                                 buffer_info->skb = NULL;
1417                                 buffer_info->length =
1418                                     (ATL1_MAX_TX_BUF_LEN >=
1419                                      data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1420                                 data_len -= buffer_info->length;
1421                                 page = virt_to_page(skb->data +
1422                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1423                                 offset = (unsigned long)(skb->data +
1424                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1425                                         ~PAGE_MASK;
1426                                 buffer_info->dma = pci_map_page(adapter->pdev,
1427                                         page, offset, buffer_info->length,
1428                                         PCI_DMA_TODEVICE);
1429                                 if (++next_to_use == tpd_ring->count)
1430                                         next_to_use = 0;
1431                         }
1432                 }
1433         } else {
1434                 /* not TSO */
1435                 buffer_info->length = buf_len;
1436                 page = virt_to_page(skb->data);
1437                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1438                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1439                         offset, buf_len, PCI_DMA_TODEVICE);
1440                 if (++next_to_use == tpd_ring->count)
1441                         next_to_use = 0;
1442         }
1443
1444         for (f = 0; f < nr_frags; f++) {
1445                 struct skb_frag_struct *frag;
1446                 u16 i, nseg;
1447
1448                 frag = &skb_shinfo(skb)->frags[f];
1449                 buf_len = frag->size;
1450
1451                 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1452                         ATL1_MAX_TX_BUF_LEN;
1453                 for (i = 0; i < nseg; i++) {
1454                         buffer_info = &tpd_ring->buffer_info[next_to_use];
1455                         if (unlikely(buffer_info->skb))
1456                                 BUG();
1457                         buffer_info->skb = NULL;
1458                         buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1459                                 ATL1_MAX_TX_BUF_LEN : buf_len;
1460                         buf_len -= buffer_info->length;
1461                         buffer_info->dma = pci_map_page(adapter->pdev,
1462                                 frag->page,
1463                                 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1464                                 buffer_info->length, PCI_DMA_TODEVICE);
1465
1466                         if (++next_to_use == tpd_ring->count)
1467                                 next_to_use = 0;
1468                 }
1469         }
1470
1471         /* last tpd's buffer-info */
1472         buffer_info->skb = skb;
1473 }
1474
1475 static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1476        struct tx_packet_desc *ptpd)
1477 {
1478         /* spinlock held */
1479         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1480         struct atl1_buffer *buffer_info;
1481         struct tx_packet_desc *tpd;
1482         u16 j;
1483         u32 val;
1484         u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1485
1486         for (j = 0; j < count; j++) {
1487                 buffer_info = &tpd_ring->buffer_info[next_to_use];
1488                 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1489                 if (tpd != ptpd)
1490                         memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1491                 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1492                 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1493                         TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1494
1495                 /*
1496                  * if this is the first packet in a TSO chain, set
1497                  * TPD_HDRFLAG, otherwise, clear it.
1498                  */
1499                 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
1500                         TPD_SEGMENT_EN_MASK;
1501                 if (val) {
1502                         if (!j)
1503                                 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1504                         else
1505                                 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
1506                 }
1507
1508                 if (j == (count - 1))
1509                         tpd->word3 |= 1 << TPD_EOP_SHIFT;
1510
1511                 if (++next_to_use == tpd_ring->count)
1512                         next_to_use = 0;
1513         }
1514         /*
1515          * Force memory writes to complete before letting h/w
1516          * know there are new descriptors to fetch.  (Only
1517          * applicable for weak-ordered memory model archs,
1518          * such as IA-64).
1519          */
1520         wmb();
1521
1522         atomic_set(&tpd_ring->next_to_use, next_to_use);
1523 }
1524
1525 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1526 {
1527         struct atl1_adapter *adapter = netdev_priv(netdev);
1528         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1529         int len = skb->len;
1530         int tso;
1531         int count = 1;
1532         int ret_val;
1533         struct tx_packet_desc *ptpd;
1534         u16 frag_size;
1535         u16 vlan_tag;
1536         unsigned long flags;
1537         unsigned int nr_frags = 0;
1538         unsigned int mss = 0;
1539         unsigned int f;
1540         unsigned int proto_hdr_len;
1541
1542         len -= skb->data_len;
1543
1544         if (unlikely(skb->len <= 0)) {
1545                 dev_kfree_skb_any(skb);
1546                 return NETDEV_TX_OK;
1547         }
1548
1549         nr_frags = skb_shinfo(skb)->nr_frags;
1550         for (f = 0; f < nr_frags; f++) {
1551                 frag_size = skb_shinfo(skb)->frags[f].size;
1552                 if (frag_size)
1553                         count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1554                                 ATL1_MAX_TX_BUF_LEN;
1555         }
1556
1557         mss = skb_shinfo(skb)->gso_size;
1558         if (mss) {
1559                 if (skb->protocol == ntohs(ETH_P_IP)) {
1560                         proto_hdr_len = (skb_transport_offset(skb) +
1561                                          tcp_hdrlen(skb));
1562                         if (unlikely(proto_hdr_len > len)) {
1563                                 dev_kfree_skb_any(skb);
1564                                 return NETDEV_TX_OK;
1565                         }
1566                         /* need additional TPD ? */
1567                         if (proto_hdr_len != len)
1568                                 count += (len - proto_hdr_len +
1569                                         ATL1_MAX_TX_BUF_LEN - 1) /
1570                                         ATL1_MAX_TX_BUF_LEN;
1571                 }
1572         }
1573
1574         if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1575                 /* Can't get lock - tell upper layer to requeue */
1576                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1577                 return NETDEV_TX_LOCKED;
1578         }
1579
1580         if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1581                 /* not enough descriptors */
1582                 netif_stop_queue(netdev);
1583                 spin_unlock_irqrestore(&adapter->lock, flags);
1584                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1585                 return NETDEV_TX_BUSY;
1586         }
1587
1588         ptpd = ATL1_TPD_DESC(tpd_ring,
1589                 (u16) atomic_read(&tpd_ring->next_to_use));
1590         memset(ptpd, 0, sizeof(struct tx_packet_desc));
1591
1592         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1593                 vlan_tag = vlan_tx_tag_get(skb);
1594                 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1595                         ((vlan_tag >> 9) & 0x8);
1596                 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1597                 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1598                         TPD_VL_TAGGED_SHIFT;
1599         }
1600
1601         tso = atl1_tso(adapter, skb, ptpd);
1602         if (tso < 0) {
1603                 spin_unlock_irqrestore(&adapter->lock, flags);
1604                 dev_kfree_skb_any(skb);
1605                 return NETDEV_TX_OK;
1606         }
1607
1608         if (!tso) {
1609                 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1610                 if (ret_val < 0) {
1611                         spin_unlock_irqrestore(&adapter->lock, flags);
1612                         dev_kfree_skb_any(skb);
1613                         return NETDEV_TX_OK;
1614                 }
1615         }
1616
1617         atl1_tx_map(adapter, skb, ptpd);
1618         atl1_tx_queue(adapter, count, ptpd);
1619         atl1_update_mailbox(adapter);
1620         spin_unlock_irqrestore(&adapter->lock, flags);
1621         netdev->trans_start = jiffies;
1622         return NETDEV_TX_OK;
1623 }
1624
1625 /*
1626  * atl1_intr - Interrupt Handler
1627  * @irq: interrupt number
1628  * @data: pointer to a network interface device structure
1629  * @pt_regs: CPU registers structure
1630  */
1631 static irqreturn_t atl1_intr(int irq, void *data)
1632 {
1633         struct atl1_adapter *adapter = netdev_priv(data);
1634         u32 status;
1635         u8 update_rx;
1636         int max_ints = 10;
1637
1638         status = adapter->cmb.cmb->int_stats;
1639         if (!status)
1640                 return IRQ_NONE;
1641
1642         update_rx = 0;
1643
1644         do {
1645                 /* clear CMB interrupt status at once */
1646                 adapter->cmb.cmb->int_stats = 0;
1647
1648                 if (status & ISR_GPHY)  /* clear phy status */
1649                         atlx_clear_phy_int(adapter);
1650
1651                 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1652                 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1653
1654                 /* check if SMB intr */
1655                 if (status & ISR_SMB)
1656                         atl1_inc_smb(adapter);
1657
1658                 /* check if PCIE PHY Link down */
1659                 if (status & ISR_PHY_LINKDOWN) {
1660                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1661                                 "pcie phy link down %x\n", status);
1662                         if (netif_running(adapter->netdev)) {   /* reset MAC */
1663                                 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1664                                 schedule_work(&adapter->pcie_dma_to_rst_task);
1665                                 return IRQ_HANDLED;
1666                         }
1667                 }
1668
1669                 /* check if DMA read/write error ? */
1670                 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1671                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1672                                 "pcie DMA r/w error (status = 0x%x)\n",
1673                                 status);
1674                         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1675                         schedule_work(&adapter->pcie_dma_to_rst_task);
1676                         return IRQ_HANDLED;
1677                 }
1678
1679                 /* link event */
1680                 if (status & ISR_GPHY) {
1681                         adapter->soft_stats.tx_carrier_errors++;
1682                         atl1_check_for_link(adapter);
1683                 }
1684
1685                 /* transmit event */
1686                 if (status & ISR_CMB_TX)
1687                         atl1_intr_tx(adapter);
1688
1689                 /* rx exception */
1690                 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1691                         ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1692                         ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1693                         if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1694                                 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1695                                 ISR_HOST_RRD_OV))
1696                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1697                                         "rx exception, ISR = 0x%x\n", status);
1698                         atl1_intr_rx(adapter);
1699                 }
1700
1701                 if (--max_ints < 0)
1702                         break;
1703
1704         } while ((status = adapter->cmb.cmb->int_stats));
1705
1706         /* re-enable Interrupt */
1707         iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1708         return IRQ_HANDLED;
1709 }
1710
1711 /*
1712  * atl1_watchdog - Timer Call-back
1713  * @data: pointer to netdev cast into an unsigned long
1714  */
1715 static void atl1_watchdog(unsigned long data)
1716 {
1717         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1718
1719         /* Reset the timer */
1720         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1721 }
1722
1723 /*
1724  * atl1_phy_config - Timer Call-back
1725  * @data: pointer to netdev cast into an unsigned long
1726  */
1727 static void atl1_phy_config(unsigned long data)
1728 {
1729         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1730         struct atl1_hw *hw = &adapter->hw;
1731         unsigned long flags;
1732
1733         spin_lock_irqsave(&adapter->lock, flags);
1734         adapter->phy_timer_pending = false;
1735         atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1736         atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1737         atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1738         spin_unlock_irqrestore(&adapter->lock, flags);
1739 }
1740
1741 /*
1742  * Orphaned vendor comment left intact here:
1743  * <vendor comment>
1744  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1745  * will assert. We do soft reset <0x1400=1> according
1746  * with the SPEC. BUT, it seemes that PCIE or DMA
1747  * state-machine will not be reset. DMAR_TO_INT will
1748  * assert again and again.
1749  * </vendor comment>
1750  */
1751 static void atl1_tx_timeout_task(struct work_struct *work)
1752 {
1753         struct atl1_adapter *adapter =
1754                 container_of(work, struct atl1_adapter, tx_timeout_task);
1755         struct net_device *netdev = adapter->netdev;
1756
1757         netif_device_detach(netdev);
1758         atl1_down(adapter);
1759         atl1_up(adapter);
1760         netif_device_attach(netdev);
1761 }
1762
1763 int atl1_reset(struct atl1_adapter *adapter)
1764 {
1765         int ret;
1766         ret = atl1_reset_hw(&adapter->hw);
1767         if (ret)
1768                 return ret;
1769         return atl1_init_hw(&adapter->hw);
1770 }
1771
1772 s32 atl1_up(struct atl1_adapter *adapter)
1773 {
1774         struct net_device *netdev = adapter->netdev;
1775         int err;
1776         int irq_flags = IRQF_SAMPLE_RANDOM;
1777
1778         /* hardware has been reset, we need to reload some things */
1779         atlx_set_multi(netdev);
1780         atl1_init_ring_ptrs(adapter);
1781         atlx_restore_vlan(adapter);
1782         err = atl1_alloc_rx_buffers(adapter);
1783         if (unlikely(!err))
1784                 /* no RX BUFFER allocated */
1785                 return -ENOMEM;
1786
1787         if (unlikely(atl1_configure(adapter))) {
1788                 err = -EIO;
1789                 goto err_up;
1790         }
1791
1792         err = pci_enable_msi(adapter->pdev);
1793         if (err) {
1794                 dev_info(&adapter->pdev->dev,
1795                         "Unable to enable MSI: %d\n", err);
1796                 irq_flags |= IRQF_SHARED;
1797         }
1798
1799         err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1800                         netdev->name, netdev);
1801         if (unlikely(err))
1802                 goto err_up;
1803
1804         mod_timer(&adapter->watchdog_timer, jiffies);
1805         atlx_irq_enable(adapter);
1806         atl1_check_link(adapter);
1807         return 0;
1808
1809 err_up:
1810         pci_disable_msi(adapter->pdev);
1811         /* free rx_buffers */
1812         atl1_clean_rx_ring(adapter);
1813         return err;
1814 }
1815
1816 void atl1_down(struct atl1_adapter *adapter)
1817 {
1818         struct net_device *netdev = adapter->netdev;
1819
1820         del_timer_sync(&adapter->watchdog_timer);
1821         del_timer_sync(&adapter->phy_config_timer);
1822         adapter->phy_timer_pending = false;
1823
1824         atlx_irq_disable(adapter);
1825         free_irq(adapter->pdev->irq, netdev);
1826         pci_disable_msi(adapter->pdev);
1827         atl1_reset_hw(&adapter->hw);
1828         adapter->cmb.cmb->int_stats = 0;
1829
1830         adapter->link_speed = SPEED_0;
1831         adapter->link_duplex = -1;
1832         netif_carrier_off(netdev);
1833         netif_stop_queue(netdev);
1834
1835         atl1_clean_tx_ring(adapter);
1836         atl1_clean_rx_ring(adapter);
1837 }
1838
1839 /*
1840  * atl1_open - Called when a network interface is made active
1841  * @netdev: network interface device structure
1842  *
1843  * Returns 0 on success, negative value on failure
1844  *
1845  * The open entry point is called when a network interface is made
1846  * active by the system (IFF_UP).  At this point all resources needed
1847  * for transmit and receive operations are allocated, the interrupt
1848  * handler is registered with the OS, the watchdog timer is started,
1849  * and the stack is notified that the interface is ready.
1850  */
1851 static int atl1_open(struct net_device *netdev)
1852 {
1853         struct atl1_adapter *adapter = netdev_priv(netdev);
1854         int err;
1855
1856         /* allocate transmit descriptors */
1857         err = atl1_setup_ring_resources(adapter);
1858         if (err)
1859                 return err;
1860
1861         err = atl1_up(adapter);
1862         if (err)
1863                 goto err_up;
1864
1865         return 0;
1866
1867 err_up:
1868         atl1_reset(adapter);
1869         return err;
1870 }
1871
1872 /*
1873  * atl1_close - Disables a network interface
1874  * @netdev: network interface device structure
1875  *
1876  * Returns 0, this is not allowed to fail
1877  *
1878  * The close entry point is called when an interface is de-activated
1879  * by the OS.  The hardware is still under the drivers control, but
1880  * needs to be disabled.  A global MAC reset is issued to stop the
1881  * hardware, and all transmit and receive resources are freed.
1882  */
1883 static int atl1_close(struct net_device *netdev)
1884 {
1885         struct atl1_adapter *adapter = netdev_priv(netdev);
1886         atl1_down(adapter);
1887         atl1_free_ring_resources(adapter);
1888         return 0;
1889 }
1890
1891 #ifdef CONFIG_PM
1892 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1893 {
1894         struct net_device *netdev = pci_get_drvdata(pdev);
1895         struct atl1_adapter *adapter = netdev_priv(netdev);
1896         struct atl1_hw *hw = &adapter->hw;
1897         u32 ctrl = 0;
1898         u32 wufc = adapter->wol;
1899
1900         netif_device_detach(netdev);
1901         if (netif_running(netdev))
1902                 atl1_down(adapter);
1903
1904         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1905         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1906         if (ctrl & BMSR_LSTATUS)
1907                 wufc &= ~ATLX_WUFC_LNKC;
1908
1909         /* reduce speed to 10/100M */
1910         if (wufc) {
1911                 atl1_phy_enter_power_saving(hw);
1912                 /* if resume, let driver to re- setup link */
1913                 hw->phy_configured = false;
1914                 atl1_set_mac_addr(hw);
1915                 atlx_set_multi(netdev);
1916
1917                 ctrl = 0;
1918                 /* turn on magic packet wol */
1919                 if (wufc & ATLX_WUFC_MAG)
1920                         ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1921
1922                 /* turn on Link change WOL */
1923                 if (wufc & ATLX_WUFC_LNKC)
1924                         ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1925                 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1926
1927                 /* turn on all-multi mode if wake on multicast is enabled */
1928                 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1929                 ctrl &= ~MAC_CTRL_DBG;
1930                 ctrl &= ~MAC_CTRL_PROMIS_EN;
1931                 if (wufc & ATLX_WUFC_MC)
1932                         ctrl |= MAC_CTRL_MC_ALL_EN;
1933                 else
1934                         ctrl &= ~MAC_CTRL_MC_ALL_EN;
1935
1936                 /* turn on broadcast mode if wake on-BC is enabled */
1937                 if (wufc & ATLX_WUFC_BC)
1938                         ctrl |= MAC_CTRL_BC_EN;
1939                 else
1940                         ctrl &= ~MAC_CTRL_BC_EN;
1941
1942                 /* enable RX */
1943                 ctrl |= MAC_CTRL_RX_EN;
1944                 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1945                 pci_enable_wake(pdev, PCI_D3hot, 1);
1946                 pci_enable_wake(pdev, PCI_D3cold, 1);
1947         } else {
1948                 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1949                 pci_enable_wake(pdev, PCI_D3hot, 0);
1950                 pci_enable_wake(pdev, PCI_D3cold, 0);
1951         }
1952
1953         pci_save_state(pdev);
1954         pci_disable_device(pdev);
1955
1956         pci_set_power_state(pdev, PCI_D3hot);
1957
1958         return 0;
1959 }
1960
1961 static int atl1_resume(struct pci_dev *pdev)
1962 {
1963         struct net_device *netdev = pci_get_drvdata(pdev);
1964         struct atl1_adapter *adapter = netdev_priv(netdev);
1965         u32 err;
1966
1967         pci_set_power_state(pdev, PCI_D0);
1968         pci_restore_state(pdev);
1969
1970         /* FIXME: check and handle */
1971         err = pci_enable_device(pdev);
1972         pci_enable_wake(pdev, PCI_D3hot, 0);
1973         pci_enable_wake(pdev, PCI_D3cold, 0);
1974
1975         iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1976         atl1_reset(adapter);
1977
1978         if (netif_running(netdev))
1979                 atl1_up(adapter);
1980         netif_device_attach(netdev);
1981
1982         atl1_via_workaround(adapter);
1983
1984         return 0;
1985 }
1986 #else
1987 #define atl1_suspend NULL
1988 #define atl1_resume NULL
1989 #endif
1990
1991 #ifdef CONFIG_NET_POLL_CONTROLLER
1992 static void atl1_poll_controller(struct net_device *netdev)
1993 {
1994         disable_irq(netdev->irq);
1995         atl1_intr(netdev->irq, netdev);
1996         enable_irq(netdev->irq);
1997 }
1998 #endif
1999
2000 /*
2001  * atl1_probe - Device Initialization Routine
2002  * @pdev: PCI device information struct
2003  * @ent: entry in atl1_pci_tbl
2004  *
2005  * Returns 0 on success, negative on failure
2006  *
2007  * atl1_probe initializes an adapter identified by a pci_dev structure.
2008  * The OS initialization, configuring of the adapter private structure,
2009  * and a hardware reset occur.
2010  */
2011 static int __devinit atl1_probe(struct pci_dev *pdev,
2012         const struct pci_device_id *ent)
2013 {
2014         struct net_device *netdev;
2015         struct atl1_adapter *adapter;
2016         static int cards_found = 0;
2017         int err;
2018
2019         err = pci_enable_device(pdev);
2020         if (err)
2021                 return err;
2022
2023         /*
2024          * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2025          * shared register for the high 32 bits, so only a single, aligned,
2026          * 4 GB physical address range can be used at a time.
2027          *
2028          * Supporting 64-bit DMA on this hardware is more trouble than it's
2029          * worth.  It is far easier to limit to 32-bit DMA than update
2030          * various kernel subsystems to support the mechanics required by a
2031          * fixed-high-32-bit system.
2032          */
2033         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2034         if (err) {
2035                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2036                 goto err_dma;
2037         }
2038         /*
2039          * Mark all PCI regions associated with PCI device
2040          * pdev as being reserved by owner atl1_driver_name
2041          */
2042         err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2043         if (err)
2044                 goto err_request_regions;
2045
2046         /*
2047          * Enables bus-mastering on the device and calls
2048          * pcibios_set_master to do the needed arch specific settings
2049          */
2050         pci_set_master(pdev);
2051
2052         netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2053         if (!netdev) {
2054                 err = -ENOMEM;
2055                 goto err_alloc_etherdev;
2056         }
2057         SET_NETDEV_DEV(netdev, &pdev->dev);
2058
2059         pci_set_drvdata(pdev, netdev);
2060         adapter = netdev_priv(netdev);
2061         adapter->netdev = netdev;
2062         adapter->pdev = pdev;
2063         adapter->hw.back = adapter;
2064
2065         adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2066         if (!adapter->hw.hw_addr) {
2067                 err = -EIO;
2068                 goto err_pci_iomap;
2069         }
2070         /* get device revision number */
2071         adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2072                 (REG_MASTER_CTRL + 2));
2073         dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2074
2075         /* set default ring resource counts */
2076         adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2077         adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2078
2079         adapter->mii.dev = netdev;
2080         adapter->mii.mdio_read = mdio_read;
2081         adapter->mii.mdio_write = mdio_write;
2082         adapter->mii.phy_id_mask = 0x1f;
2083         adapter->mii.reg_num_mask = 0x1f;
2084
2085         netdev->open = &atl1_open;
2086         netdev->stop = &atl1_close;
2087         netdev->hard_start_xmit = &atl1_xmit_frame;
2088         netdev->get_stats = &atlx_get_stats;
2089         netdev->set_multicast_list = &atlx_set_multi;
2090         netdev->set_mac_address = &atl1_set_mac;
2091         netdev->change_mtu = &atl1_change_mtu;
2092         netdev->do_ioctl = &atlx_ioctl;
2093         netdev->tx_timeout = &atlx_tx_timeout;
2094         netdev->watchdog_timeo = 5 * HZ;
2095 #ifdef CONFIG_NET_POLL_CONTROLLER
2096         netdev->poll_controller = atl1_poll_controller;
2097 #endif
2098         netdev->vlan_rx_register = atlx_vlan_rx_register;
2099
2100         netdev->ethtool_ops = &atl1_ethtool_ops;
2101         adapter->bd_number = cards_found;
2102
2103         /* setup the private structure */
2104         err = atl1_sw_init(adapter);
2105         if (err)
2106                 goto err_common;
2107
2108         netdev->features = NETIF_F_HW_CSUM;
2109         netdev->features |= NETIF_F_SG;
2110         netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2111         netdev->features |= NETIF_F_TSO;
2112         netdev->features |= NETIF_F_LLTX;
2113
2114         /*
2115          * patch for some L1 of old version,
2116          * the final version of L1 may not need these
2117          * patches
2118          */
2119         /* atl1_pcie_patch(adapter); */
2120
2121         /* really reset GPHY core */
2122         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2123
2124         /*
2125          * reset the controller to
2126          * put the device in a known good starting state
2127          */
2128         if (atl1_reset_hw(&adapter->hw)) {
2129                 err = -EIO;
2130                 goto err_common;
2131         }
2132
2133         /* copy the MAC address out of the EEPROM */
2134         atl1_read_mac_addr(&adapter->hw);
2135         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2136
2137         if (!is_valid_ether_addr(netdev->dev_addr)) {
2138                 err = -EIO;
2139                 goto err_common;
2140         }
2141
2142         atl1_check_options(adapter);
2143
2144         /* pre-init the MAC, and setup link */
2145         err = atl1_init_hw(&adapter->hw);
2146         if (err) {
2147                 err = -EIO;
2148                 goto err_common;
2149         }
2150
2151         atl1_pcie_patch(adapter);
2152         /* assume we have no link for now */
2153         netif_carrier_off(netdev);
2154         netif_stop_queue(netdev);
2155
2156         init_timer(&adapter->watchdog_timer);
2157         adapter->watchdog_timer.function = &atl1_watchdog;
2158         adapter->watchdog_timer.data = (unsigned long)adapter;
2159
2160         init_timer(&adapter->phy_config_timer);
2161         adapter->phy_config_timer.function = &atl1_phy_config;
2162         adapter->phy_config_timer.data = (unsigned long)adapter;
2163         adapter->phy_timer_pending = false;
2164
2165         INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2166
2167         INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2168
2169         INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2170
2171         err = register_netdev(netdev);
2172         if (err)
2173                 goto err_common;
2174
2175         cards_found++;
2176         atl1_via_workaround(adapter);
2177         return 0;
2178
2179 err_common:
2180         pci_iounmap(pdev, adapter->hw.hw_addr);
2181 err_pci_iomap:
2182         free_netdev(netdev);
2183 err_alloc_etherdev:
2184         pci_release_regions(pdev);
2185 err_dma:
2186 err_request_regions:
2187         pci_disable_device(pdev);
2188         return err;
2189 }
2190
2191 /*
2192  * atl1_remove - Device Removal Routine
2193  * @pdev: PCI device information struct
2194  *
2195  * atl1_remove is called by the PCI subsystem to alert the driver
2196  * that it should release a PCI device.  The could be caused by a
2197  * Hot-Plug event, or because the driver is going to be removed from
2198  * memory.
2199  */
2200 static void __devexit atl1_remove(struct pci_dev *pdev)
2201 {
2202         struct net_device *netdev = pci_get_drvdata(pdev);
2203         struct atl1_adapter *adapter;
2204         /* Device not available. Return. */
2205         if (!netdev)
2206                 return;
2207
2208         adapter = netdev_priv(netdev);
2209
2210         /*
2211          * Some atl1 boards lack persistent storage for their MAC, and get it
2212          * from the BIOS during POST.  If we've been messing with the MAC
2213          * address, we need to save the permanent one.
2214          */
2215         if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2216                 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2217                         ETH_ALEN);
2218                 atl1_set_mac_addr(&adapter->hw);
2219         }
2220
2221         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2222         unregister_netdev(netdev);
2223         pci_iounmap(pdev, adapter->hw.hw_addr);
2224         pci_release_regions(pdev);
2225         free_netdev(netdev);
2226         pci_disable_device(pdev);
2227 }
2228
2229 static struct pci_driver atl1_driver = {
2230         .name = ATLX_DRIVER_NAME,
2231         .id_table = atl1_pci_tbl,
2232         .probe = atl1_probe,
2233         .remove = __devexit_p(atl1_remove),
2234         .suspend = atl1_suspend,
2235         .resume = atl1_resume
2236 };
2237
2238 /*
2239  * atl1_exit_module - Driver Exit Cleanup Routine
2240  *
2241  * atl1_exit_module is called just before the driver is removed
2242  * from memory.
2243  */
2244 static void __exit atl1_exit_module(void)
2245 {
2246         pci_unregister_driver(&atl1_driver);
2247 }
2248
2249 /*
2250  * atl1_init_module - Driver Registration Routine
2251  *
2252  * atl1_init_module is the first routine called when the driver is
2253  * loaded. All it does is register with the PCI subsystem.
2254  */
2255 static int __init atl1_init_module(void)
2256 {
2257         return pci_register_driver(&atl1_driver);
2258 }
2259
2260 module_init(atl1_init_module);
2261 module_exit(atl1_exit_module);
2262
2263 struct atl1_stats {
2264         char stat_string[ETH_GSTRING_LEN];
2265         int sizeof_stat;
2266         int stat_offset;
2267 };
2268
2269 #define ATL1_STAT(m) \
2270         sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2271
2272 static struct atl1_stats atl1_gstrings_stats[] = {
2273         {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2274         {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2275         {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2276         {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2277         {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2278         {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2279         {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2280         {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2281         {"multicast", ATL1_STAT(soft_stats.multicast)},
2282         {"collisions", ATL1_STAT(soft_stats.collisions)},
2283         {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2284         {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2285         {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2286         {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2287         {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2288         {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2289         {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2290         {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2291         {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2292         {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2293         {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2294         {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2295         {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2296         {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2297         {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2298         {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2299         {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2300         {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2301         {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2302         {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2303         {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2304 };
2305
2306 static void atl1_get_ethtool_stats(struct net_device *netdev,
2307         struct ethtool_stats *stats, u64 *data)
2308 {
2309         struct atl1_adapter *adapter = netdev_priv(netdev);
2310         int i;
2311         char *p;
2312
2313         for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2314                 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2315                 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2316                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2317         }
2318
2319 }
2320
2321 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2322 {
2323         switch (sset) {
2324         case ETH_SS_STATS:
2325                 return ARRAY_SIZE(atl1_gstrings_stats);
2326         default:
2327                 return -EOPNOTSUPP;
2328         }
2329 }
2330
2331 static int atl1_get_settings(struct net_device *netdev,
2332         struct ethtool_cmd *ecmd)
2333 {
2334         struct atl1_adapter *adapter = netdev_priv(netdev);
2335         struct atl1_hw *hw = &adapter->hw;
2336
2337         ecmd->supported = (SUPPORTED_10baseT_Half |
2338                            SUPPORTED_10baseT_Full |
2339                            SUPPORTED_100baseT_Half |
2340                            SUPPORTED_100baseT_Full |
2341                            SUPPORTED_1000baseT_Full |
2342                            SUPPORTED_Autoneg | SUPPORTED_TP);
2343         ecmd->advertising = ADVERTISED_TP;
2344         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2345             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2346                 ecmd->advertising |= ADVERTISED_Autoneg;
2347                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2348                         ecmd->advertising |= ADVERTISED_Autoneg;
2349                         ecmd->advertising |=
2350                             (ADVERTISED_10baseT_Half |
2351                              ADVERTISED_10baseT_Full |
2352                              ADVERTISED_100baseT_Half |
2353                              ADVERTISED_100baseT_Full |
2354                              ADVERTISED_1000baseT_Full);
2355                 } else
2356                         ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2357         }
2358         ecmd->port = PORT_TP;
2359         ecmd->phy_address = 0;
2360         ecmd->transceiver = XCVR_INTERNAL;
2361
2362         if (netif_carrier_ok(adapter->netdev)) {
2363                 u16 link_speed, link_duplex;
2364                 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2365                 ecmd->speed = link_speed;
2366                 if (link_duplex == FULL_DUPLEX)
2367                         ecmd->duplex = DUPLEX_FULL;
2368                 else
2369                         ecmd->duplex = DUPLEX_HALF;
2370         } else {
2371                 ecmd->speed = -1;
2372                 ecmd->duplex = -1;
2373         }
2374         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2375             hw->media_type == MEDIA_TYPE_1000M_FULL)
2376                 ecmd->autoneg = AUTONEG_ENABLE;
2377         else
2378                 ecmd->autoneg = AUTONEG_DISABLE;
2379
2380         return 0;
2381 }
2382
2383 static int atl1_set_settings(struct net_device *netdev,
2384         struct ethtool_cmd *ecmd)
2385 {
2386         struct atl1_adapter *adapter = netdev_priv(netdev);
2387         struct atl1_hw *hw = &adapter->hw;
2388         u16 phy_data;
2389         int ret_val = 0;
2390         u16 old_media_type = hw->media_type;
2391
2392         if (netif_running(adapter->netdev)) {
2393                 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2394                 atl1_down(adapter);
2395         }
2396
2397         if (ecmd->autoneg == AUTONEG_ENABLE)
2398                 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2399         else {
2400                 if (ecmd->speed == SPEED_1000) {
2401                         if (ecmd->duplex != DUPLEX_FULL) {
2402                                 dev_warn(&adapter->pdev->dev,
2403                                         "can't force to 1000M half duplex\n");
2404                                 ret_val = -EINVAL;
2405                                 goto exit_sset;
2406                         }
2407                         hw->media_type = MEDIA_TYPE_1000M_FULL;
2408                 } else if (ecmd->speed == SPEED_100) {
2409                         if (ecmd->duplex == DUPLEX_FULL)
2410                                 hw->media_type = MEDIA_TYPE_100M_FULL;
2411                         else
2412                                 hw->media_type = MEDIA_TYPE_100M_HALF;
2413                 } else {
2414                         if (ecmd->duplex == DUPLEX_FULL)
2415                                 hw->media_type = MEDIA_TYPE_10M_FULL;
2416                         else
2417                                 hw->media_type = MEDIA_TYPE_10M_HALF;
2418                 }
2419         }
2420         switch (hw->media_type) {
2421         case MEDIA_TYPE_AUTO_SENSOR:
2422                 ecmd->advertising =
2423                     ADVERTISED_10baseT_Half |
2424                     ADVERTISED_10baseT_Full |
2425                     ADVERTISED_100baseT_Half |
2426                     ADVERTISED_100baseT_Full |
2427                     ADVERTISED_1000baseT_Full |
2428                     ADVERTISED_Autoneg | ADVERTISED_TP;
2429                 break;
2430         case MEDIA_TYPE_1000M_FULL:
2431                 ecmd->advertising =
2432                     ADVERTISED_1000baseT_Full |
2433                     ADVERTISED_Autoneg | ADVERTISED_TP;
2434                 break;
2435         default:
2436                 ecmd->advertising = 0;
2437                 break;
2438         }
2439         if (atl1_phy_setup_autoneg_adv(hw)) {
2440                 ret_val = -EINVAL;
2441                 dev_warn(&adapter->pdev->dev,
2442                         "invalid ethtool speed/duplex setting\n");
2443                 goto exit_sset;
2444         }
2445         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2446             hw->media_type == MEDIA_TYPE_1000M_FULL)
2447                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2448         else {
2449                 switch (hw->media_type) {
2450                 case MEDIA_TYPE_100M_FULL:
2451                         phy_data =
2452                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2453                             MII_CR_RESET;
2454                         break;
2455                 case MEDIA_TYPE_100M_HALF:
2456                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2457                         break;
2458                 case MEDIA_TYPE_10M_FULL:
2459                         phy_data =
2460                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2461                         break;
2462                 default:
2463                         /* MEDIA_TYPE_10M_HALF: */
2464                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2465                         break;
2466                 }
2467         }
2468         atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2469 exit_sset:
2470         if (ret_val)
2471                 hw->media_type = old_media_type;
2472
2473         if (netif_running(adapter->netdev)) {
2474                 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2475                 atl1_up(adapter);
2476         } else if (!ret_val) {
2477                 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2478                 atl1_reset(adapter);
2479         }
2480         return ret_val;
2481 }
2482
2483 static void atl1_get_drvinfo(struct net_device *netdev,
2484         struct ethtool_drvinfo *drvinfo)
2485 {
2486         struct atl1_adapter *adapter = netdev_priv(netdev);
2487
2488         strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2489         strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2490                 sizeof(drvinfo->version));
2491         strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2492         strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2493                 sizeof(drvinfo->bus_info));
2494         drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2495 }
2496
2497 static void atl1_get_wol(struct net_device *netdev,
2498         struct ethtool_wolinfo *wol)
2499 {
2500         struct atl1_adapter *adapter = netdev_priv(netdev);
2501
2502         wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2503         wol->wolopts = 0;
2504         if (adapter->wol & ATLX_WUFC_EX)
2505                 wol->wolopts |= WAKE_UCAST;
2506         if (adapter->wol & ATLX_WUFC_MC)
2507                 wol->wolopts |= WAKE_MCAST;
2508         if (adapter->wol & ATLX_WUFC_BC)
2509                 wol->wolopts |= WAKE_BCAST;
2510         if (adapter->wol & ATLX_WUFC_MAG)
2511                 wol->wolopts |= WAKE_MAGIC;
2512         return;
2513 }
2514
2515 static int atl1_set_wol(struct net_device *netdev,
2516         struct ethtool_wolinfo *wol)
2517 {
2518         struct atl1_adapter *adapter = netdev_priv(netdev);
2519
2520         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2521                 return -EOPNOTSUPP;
2522         adapter->wol = 0;
2523         if (wol->wolopts & WAKE_UCAST)
2524                 adapter->wol |= ATLX_WUFC_EX;
2525         if (wol->wolopts & WAKE_MCAST)
2526                 adapter->wol |= ATLX_WUFC_MC;
2527         if (wol->wolopts & WAKE_BCAST)
2528                 adapter->wol |= ATLX_WUFC_BC;
2529         if (wol->wolopts & WAKE_MAGIC)
2530                 adapter->wol |= ATLX_WUFC_MAG;
2531         return 0;
2532 }
2533
2534 static int atl1_get_regs_len(struct net_device *netdev)
2535 {
2536         return ATL1_REG_COUNT * sizeof(u32);
2537 }
2538
2539 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2540         void *p)
2541 {
2542         struct atl1_adapter *adapter = netdev_priv(netdev);
2543         struct atl1_hw *hw = &adapter->hw;
2544         unsigned int i;
2545         u32 *regbuf = p;
2546
2547         for (i = 0; i < ATL1_REG_COUNT; i++) {
2548                 /*
2549                  * This switch statement avoids reserved regions
2550                  * of register space.
2551                  */
2552                 switch (i) {
2553                 case 6 ... 9:
2554                 case 14:
2555                 case 29 ... 31:
2556                 case 34 ... 63:
2557                 case 75 ... 127:
2558                 case 136 ... 1023:
2559                 case 1027 ... 1087:
2560                 case 1091 ... 1151:
2561                 case 1194 ... 1195:
2562                 case 1200 ... 1201:
2563                 case 1206 ... 1213:
2564                 case 1216 ... 1279:
2565                 case 1290 ... 1311:
2566                 case 1323 ... 1343:
2567                 case 1358 ... 1359:
2568                 case 1368 ... 1375:
2569                 case 1378 ... 1383:
2570                 case 1388 ... 1391:
2571                 case 1393 ... 1395:
2572                 case 1402 ... 1403:
2573                 case 1410 ... 1471:
2574                 case 1522 ... 1535:
2575                         /* reserved region; don't read it */
2576                         regbuf[i] = 0;
2577                         break;
2578                 default:
2579                         /* unreserved region */
2580                         regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2581                 }
2582         }
2583 }
2584
2585 static void atl1_get_ringparam(struct net_device *netdev,
2586         struct ethtool_ringparam *ring)
2587 {
2588         struct atl1_adapter *adapter = netdev_priv(netdev);
2589         struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2590         struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2591
2592         ring->rx_max_pending = ATL1_MAX_RFD;
2593         ring->tx_max_pending = ATL1_MAX_TPD;
2594         ring->rx_mini_max_pending = 0;
2595         ring->rx_jumbo_max_pending = 0;
2596         ring->rx_pending = rxdr->count;
2597         ring->tx_pending = txdr->count;
2598         ring->rx_mini_pending = 0;
2599         ring->rx_jumbo_pending = 0;
2600 }
2601
2602 static int atl1_set_ringparam(struct net_device *netdev,
2603         struct ethtool_ringparam *ring)
2604 {
2605         struct atl1_adapter *adapter = netdev_priv(netdev);
2606         struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2607         struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2608         struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2609
2610         struct atl1_tpd_ring tpd_old, tpd_new;
2611         struct atl1_rfd_ring rfd_old, rfd_new;
2612         struct atl1_rrd_ring rrd_old, rrd_new;
2613         struct atl1_ring_header rhdr_old, rhdr_new;
2614         int err;
2615
2616         tpd_old = adapter->tpd_ring;
2617         rfd_old = adapter->rfd_ring;
2618         rrd_old = adapter->rrd_ring;
2619         rhdr_old = adapter->ring_header;
2620
2621         if (netif_running(adapter->netdev))
2622                 atl1_down(adapter);
2623
2624         rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2625         rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2626                         rfdr->count;
2627         rfdr->count = (rfdr->count + 3) & ~3;
2628         rrdr->count = rfdr->count;
2629
2630         tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2631         tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2632                         tpdr->count;
2633         tpdr->count = (tpdr->count + 3) & ~3;
2634
2635         if (netif_running(adapter->netdev)) {
2636                 /* try to get new resources before deleting old */
2637                 err = atl1_setup_ring_resources(adapter);
2638                 if (err)
2639                         goto err_setup_ring;
2640
2641                 /*
2642                  * save the new, restore the old in order to free it,
2643                  * then restore the new back again
2644                  */
2645
2646                 rfd_new = adapter->rfd_ring;
2647                 rrd_new = adapter->rrd_ring;
2648                 tpd_new = adapter->tpd_ring;
2649                 rhdr_new = adapter->ring_header;
2650                 adapter->rfd_ring = rfd_old;
2651                 adapter->rrd_ring = rrd_old;
2652                 adapter->tpd_ring = tpd_old;
2653                 adapter->ring_header = rhdr_old;
2654                 atl1_free_ring_resources(adapter);
2655                 adapter->rfd_ring = rfd_new;
2656                 adapter->rrd_ring = rrd_new;
2657                 adapter->tpd_ring = tpd_new;
2658                 adapter->ring_header = rhdr_new;
2659
2660                 err = atl1_up(adapter);
2661                 if (err)
2662                         return err;
2663         }
2664         return 0;
2665
2666 err_setup_ring:
2667         adapter->rfd_ring = rfd_old;
2668         adapter->rrd_ring = rrd_old;
2669         adapter->tpd_ring = tpd_old;
2670         adapter->ring_header = rhdr_old;
2671         atl1_up(adapter);
2672         return err;
2673 }
2674
2675 static void atl1_get_pauseparam(struct net_device *netdev,
2676         struct ethtool_pauseparam *epause)
2677 {
2678         struct atl1_adapter *adapter = netdev_priv(netdev);
2679         struct atl1_hw *hw = &adapter->hw;
2680
2681         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2682             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2683                 epause->autoneg = AUTONEG_ENABLE;
2684         } else {
2685                 epause->autoneg = AUTONEG_DISABLE;
2686         }
2687         epause->rx_pause = 1;
2688         epause->tx_pause = 1;
2689 }
2690
2691 static int atl1_set_pauseparam(struct net_device *netdev,
2692         struct ethtool_pauseparam *epause)
2693 {
2694         struct atl1_adapter *adapter = netdev_priv(netdev);
2695         struct atl1_hw *hw = &adapter->hw;
2696
2697         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2698             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2699                 epause->autoneg = AUTONEG_ENABLE;
2700         } else {
2701                 epause->autoneg = AUTONEG_DISABLE;
2702         }
2703
2704         epause->rx_pause = 1;
2705         epause->tx_pause = 1;
2706
2707         return 0;
2708 }
2709
2710 /* FIXME: is this right? -- CHS */
2711 static u32 atl1_get_rx_csum(struct net_device *netdev)
2712 {
2713         return 1;
2714 }
2715
2716 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2717         u8 *data)
2718 {
2719         u8 *p = data;
2720         int i;
2721
2722         switch (stringset) {
2723         case ETH_SS_STATS:
2724                 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2725                         memcpy(p, atl1_gstrings_stats[i].stat_string,
2726                                 ETH_GSTRING_LEN);
2727                         p += ETH_GSTRING_LEN;
2728                 }
2729                 break;
2730         }
2731 }
2732
2733 static int atl1_nway_reset(struct net_device *netdev)
2734 {
2735         struct atl1_adapter *adapter = netdev_priv(netdev);
2736         struct atl1_hw *hw = &adapter->hw;
2737
2738         if (netif_running(netdev)) {
2739                 u16 phy_data;
2740                 atl1_down(adapter);
2741
2742                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2743                         hw->media_type == MEDIA_TYPE_1000M_FULL) {
2744                         phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2745                 } else {
2746                         switch (hw->media_type) {
2747                         case MEDIA_TYPE_100M_FULL:
2748                                 phy_data = MII_CR_FULL_DUPLEX |
2749                                         MII_CR_SPEED_100 | MII_CR_RESET;
2750                                 break;
2751                         case MEDIA_TYPE_100M_HALF:
2752                                 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2753                                 break;
2754                         case MEDIA_TYPE_10M_FULL:
2755                                 phy_data = MII_CR_FULL_DUPLEX |
2756                                         MII_CR_SPEED_10 | MII_CR_RESET;
2757                                 break;
2758                         default:
2759                                 /* MEDIA_TYPE_10M_HALF */
2760                                 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2761                         }
2762                 }
2763                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2764                 atl1_up(adapter);
2765         }
2766         return 0;
2767 }
2768
2769 const struct ethtool_ops atl1_ethtool_ops = {
2770         .get_settings           = atl1_get_settings,
2771         .set_settings           = atl1_set_settings,
2772         .get_drvinfo            = atl1_get_drvinfo,
2773         .get_wol                = atl1_get_wol,
2774         .set_wol                = atl1_set_wol,
2775         .get_regs_len           = atl1_get_regs_len,
2776         .get_regs               = atl1_get_regs,
2777         .get_ringparam          = atl1_get_ringparam,
2778         .set_ringparam          = atl1_set_ringparam,
2779         .get_pauseparam         = atl1_get_pauseparam,
2780         .set_pauseparam         = atl1_set_pauseparam,
2781         .get_rx_csum            = atl1_get_rx_csum,
2782         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
2783         .get_link               = ethtool_op_get_link,
2784         .set_sg                 = ethtool_op_set_sg,
2785         .get_strings            = atl1_get_strings,
2786         .nway_reset             = atl1_nway_reset,
2787         .get_ethtool_stats      = atl1_get_ethtool_stats,
2788         .get_sset_count         = atl1_get_sset_count,
2789         .set_tso                = ethtool_op_set_tso,
2790 };
2791
2792 /*
2793  * Reset the transmit and receive units; mask and clear all interrupts.
2794  * hw - Struct containing variables accessed by shared code
2795  * return : 0  or  idle status (if error)
2796  */
2797 s32 atl1_reset_hw(struct atl1_hw *hw)
2798 {
2799         struct pci_dev *pdev = hw->back->pdev;
2800         u32 icr;
2801         int i;
2802
2803         /*
2804          * Clear Interrupt mask to stop board from generating
2805          * interrupts & Clear any pending interrupt events
2806          */
2807         /*
2808          * iowrite32(0, hw->hw_addr + REG_IMR);
2809          * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2810          */
2811
2812         /*
2813          * Issue Soft Reset to the MAC.  This will reset the chip's
2814          * transmit, receive, DMA.  It will not effect
2815          * the current PCI configuration.  The global reset bit is self-
2816          * clearing, and should clear within a microsecond.
2817          */
2818         iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2819         ioread32(hw->hw_addr + REG_MASTER_CTRL);
2820
2821         iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2822         ioread16(hw->hw_addr + REG_PHY_ENABLE);
2823
2824         /* delay about 1ms */
2825         msleep(1);
2826
2827         /* Wait at least 10ms for All module to be Idle */
2828         for (i = 0; i < 10; i++) {
2829                 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2830                 if (!icr)
2831                         break;
2832                 /* delay 1 ms */
2833                 msleep(1);
2834                 /* FIXME: still the right way to do this? */
2835                 cpu_relax();
2836         }
2837
2838         if (icr) {
2839                 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2840                 return icr;
2841         }
2842
2843         return 0;
2844 }
2845
2846 /* function about EEPROM
2847  *
2848  * check_eeprom_exist
2849  * return 0 if eeprom exist
2850  */
2851 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2852 {
2853         u32 value;
2854         value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2855         if (value & SPI_FLASH_CTRL_EN_VPD) {
2856                 value &= ~SPI_FLASH_CTRL_EN_VPD;
2857                 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2858         }
2859
2860         value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2861         return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2862 }
2863
2864 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2865 {
2866         int i;
2867         u32 control;
2868
2869         if (offset & 3)
2870                 /* address do not align */
2871                 return false;
2872
2873         iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2874         control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2875         iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2876         ioread32(hw->hw_addr + REG_VPD_CAP);
2877
2878         for (i = 0; i < 10; i++) {
2879                 msleep(2);
2880                 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2881                 if (control & VPD_CAP_VPD_FLAG)
2882                         break;
2883         }
2884         if (control & VPD_CAP_VPD_FLAG) {
2885                 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2886                 return true;
2887         }
2888         /* timeout */
2889         return false;
2890 }
2891
2892 /*
2893  * Reads the value from a PHY register
2894  * hw - Struct containing variables accessed by shared code
2895  * reg_addr - address of the PHY register to read
2896  */
2897 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2898 {
2899         u32 val;
2900         int i;
2901
2902         val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2903                 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2904                 MDIO_CLK_SEL_SHIFT;
2905         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2906         ioread32(hw->hw_addr + REG_MDIO_CTRL);
2907
2908         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2909                 udelay(2);
2910                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2911                 if (!(val & (MDIO_START | MDIO_BUSY)))
2912                         break;
2913         }
2914         if (!(val & (MDIO_START | MDIO_BUSY))) {
2915                 *phy_data = (u16) val;
2916                 return 0;
2917         }
2918         return ATLX_ERR_PHY;
2919 }
2920
2921 #define CUSTOM_SPI_CS_SETUP     2
2922 #define CUSTOM_SPI_CLK_HI       2
2923 #define CUSTOM_SPI_CLK_LO       2
2924 #define CUSTOM_SPI_CS_HOLD      2
2925 #define CUSTOM_SPI_CS_HI        3
2926
2927 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2928 {
2929         int i;
2930         u32 value;
2931
2932         iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2933         iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2934
2935         value = SPI_FLASH_CTRL_WAIT_READY |
2936             (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2937             SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2938                                              SPI_FLASH_CTRL_CLK_HI_MASK) <<
2939             SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2940                                            SPI_FLASH_CTRL_CLK_LO_MASK) <<
2941             SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2942                                            SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2943             SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2944                                             SPI_FLASH_CTRL_CS_HI_MASK) <<
2945             SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2946             SPI_FLASH_CTRL_INS_SHIFT;
2947
2948         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2949
2950         value |= SPI_FLASH_CTRL_START;
2951         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2952         ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2953
2954         for (i = 0; i < 10; i++) {
2955                 msleep(1);
2956                 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2957                 if (!(value & SPI_FLASH_CTRL_START))
2958                         break;
2959         }
2960
2961         if (value & SPI_FLASH_CTRL_START)
2962                 return false;
2963
2964         *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2965
2966         return true;
2967 }
2968
2969 /*
2970  * get_permanent_address
2971  * return 0 if get valid mac address,
2972  */
2973 static int atl1_get_permanent_address(struct atl1_hw *hw)
2974 {
2975         u32 addr[2];
2976         u32 i, control;
2977         u16 reg;
2978         u8 eth_addr[ETH_ALEN];
2979         bool key_valid;
2980
2981         if (is_valid_ether_addr(hw->perm_mac_addr))
2982                 return 0;
2983
2984         /* init */
2985         addr[0] = addr[1] = 0;
2986
2987         if (!atl1_check_eeprom_exist(hw)) {
2988                 reg = 0;
2989                 key_valid = false;
2990                 /* Read out all EEPROM content */
2991                 i = 0;
2992                 while (1) {
2993                         if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2994                                 if (key_valid) {
2995                                         if (reg == REG_MAC_STA_ADDR)
2996                                                 addr[0] = control;
2997                                         else if (reg == (REG_MAC_STA_ADDR + 4))
2998                                                 addr[1] = control;
2999                                         key_valid = false;
3000                                 } else if ((control & 0xff) == 0x5A) {
3001                                         key_valid = true;
3002                                         reg = (u16) (control >> 16);
3003                                 } else
3004                                         break;
3005                         } else
3006                                 /* read error */
3007                                 break;
3008                         i += 4;
3009                 }
3010
3011                 *(u32 *) &eth_addr[2] = swab32(addr[0]);
3012                 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3013                 if (is_valid_ether_addr(eth_addr)) {
3014                         memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3015                         return 0;
3016                 }
3017                 return 1;
3018         }
3019
3020         /* see if SPI FLAGS exist ? */
3021         addr[0] = addr[1] = 0;
3022         reg = 0;
3023         key_valid = false;
3024         i = 0;
3025         while (1) {
3026                 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3027                         if (key_valid) {
3028                                 if (reg == REG_MAC_STA_ADDR)
3029                                         addr[0] = control;
3030                                 else if (reg == (REG_MAC_STA_ADDR + 4))
3031                                         addr[1] = control;
3032                                 key_valid = false;
3033                         } else if ((control & 0xff) == 0x5A) {
3034                                 key_valid = true;
3035                                 reg = (u16) (control >> 16);
3036                         } else
3037                                 /* data end */
3038                                 break;
3039                 } else
3040                         /* read error */
3041                         break;
3042                 i += 4;
3043         }
3044
3045         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3046         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3047         if (is_valid_ether_addr(eth_addr)) {
3048                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3049                 return 0;
3050         }
3051
3052         /*
3053          * On some motherboards, the MAC address is written by the
3054          * BIOS directly to the MAC register during POST, and is
3055          * not stored in eeprom.  If all else thus far has failed
3056          * to fetch the permanent MAC address, try reading it directly.
3057          */
3058         addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3059         addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3060         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3061         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3062         if (is_valid_ether_addr(eth_addr)) {
3063                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3064                 return 0;
3065         }
3066
3067         return 1;
3068 }
3069
3070 /*
3071  * Reads the adapter's MAC address from the EEPROM
3072  * hw - Struct containing variables accessed by shared code
3073  */
3074 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3075 {
3076         u16 i;
3077
3078         if (atl1_get_permanent_address(hw))
3079                 random_ether_addr(hw->perm_mac_addr);
3080
3081         for (i = 0; i < ETH_ALEN; i++)
3082                 hw->mac_addr[i] = hw->perm_mac_addr[i];
3083         return 0;
3084 }
3085
3086 /*
3087  * Hashes an address to determine its location in the multicast table
3088  * hw - Struct containing variables accessed by shared code
3089  * mc_addr - the multicast address to hash
3090  *
3091  * atl1_hash_mc_addr
3092  *  purpose
3093  *      set hash value for a multicast address
3094  *      hash calcu processing :
3095  *          1. calcu 32bit CRC for multicast address
3096  *          2. reverse crc with MSB to LSB
3097  */
3098 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3099 {
3100         u32 crc32, value = 0;
3101         int i;
3102
3103         crc32 = ether_crc_le(6, mc_addr);
3104         for (i = 0; i < 32; i++)
3105                 value |= (((crc32 >> i) & 1) << (31 - i));
3106
3107         return value;
3108 }
3109
3110 /*
3111  * Sets the bit in the multicast table corresponding to the hash value.
3112  * hw - Struct containing variables accessed by shared code
3113  * hash_value - Multicast address hash value
3114  */
3115 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3116 {
3117         u32 hash_bit, hash_reg;
3118         u32 mta;
3119
3120         /*
3121          * The HASH Table  is a register array of 2 32-bit registers.
3122          * It is treated like an array of 64 bits.  We want to set
3123          * bit BitArray[hash_value]. So we figure out what register
3124          * the bit is in, read it, OR in the new bit, then write
3125          * back the new value.  The register is determined by the
3126          * upper 7 bits of the hash value and the bit within that
3127          * register are determined by the lower 5 bits of the value.
3128          */
3129         hash_reg = (hash_value >> 31) & 0x1;
3130         hash_bit = (hash_value >> 26) & 0x1F;
3131         mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3132         mta |= (1 << hash_bit);
3133         iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3134 }
3135
3136 /*
3137  * Writes a value to a PHY register
3138  * hw - Struct containing variables accessed by shared code
3139  * reg_addr - address of the PHY register to write
3140  * data - data to write to the PHY
3141  */
3142 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3143 {
3144         int i;
3145         u32 val;
3146
3147         val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3148             (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3149             MDIO_SUP_PREAMBLE |
3150             MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3151         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3152         ioread32(hw->hw_addr + REG_MDIO_CTRL);
3153
3154         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3155                 udelay(2);
3156                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3157                 if (!(val & (MDIO_START | MDIO_BUSY)))
3158                         break;
3159         }
3160
3161         if (!(val & (MDIO_START | MDIO_BUSY)))
3162                 return 0;
3163
3164         return ATLX_ERR_PHY;
3165 }
3166
3167 /*
3168  * Make L001's PHY out of Power Saving State (bug)
3169  * hw - Struct containing variables accessed by shared code
3170  * when power on, L001's PHY always on Power saving State
3171  * (Gigabit Link forbidden)
3172  */
3173 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3174 {
3175         s32 ret;
3176         ret = atl1_write_phy_reg(hw, 29, 0x0029);
3177         if (ret)
3178                 return ret;
3179         return atl1_write_phy_reg(hw, 30, 0);
3180 }
3181
3182 /*
3183  *TODO: do something or get rid of this
3184  */
3185 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3186 {
3187 /*    s32 ret_val;
3188  *    u16 phy_data;
3189  */
3190
3191 /*
3192     ret_val = atl1_write_phy_reg(hw, ...);
3193     ret_val = atl1_write_phy_reg(hw, ...);
3194     ....
3195 */
3196         return 0;
3197 }
3198
3199 /*
3200  * Resets the PHY and make all config validate
3201  * hw - Struct containing variables accessed by shared code
3202  *
3203  * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3204  */
3205 static s32 atl1_phy_reset(struct atl1_hw *hw)
3206 {
3207         struct pci_dev *pdev = hw->back->pdev;
3208         s32 ret_val;
3209         u16 phy_data;
3210
3211         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3212             hw->media_type == MEDIA_TYPE_1000M_FULL)
3213                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3214         else {
3215                 switch (hw->media_type) {
3216                 case MEDIA_TYPE_100M_FULL:
3217                         phy_data =
3218                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3219                             MII_CR_RESET;
3220                         break;
3221                 case MEDIA_TYPE_100M_HALF:
3222                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3223                         break;
3224                 case MEDIA_TYPE_10M_FULL:
3225                         phy_data =
3226                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3227                         break;
3228                 default:
3229                         /* MEDIA_TYPE_10M_HALF: */
3230                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3231                         break;
3232                 }
3233         }
3234
3235         ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3236         if (ret_val) {
3237                 u32 val;
3238                 int i;
3239                 /* pcie serdes link may be down! */
3240                 dev_dbg(&pdev->dev, "pcie phy link down\n");
3241
3242                 for (i = 0; i < 25; i++) {
3243                         msleep(1);
3244                         val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3245                         if (!(val & (MDIO_START | MDIO_BUSY)))
3246                                 break;
3247                 }
3248
3249                 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3250                         dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3251                         return ret_val;
3252                 }
3253         }
3254         return 0;
3255 }
3256
3257 /*
3258  * Configures PHY autoneg and flow control advertisement settings
3259  * hw - Struct containing variables accessed by shared code
3260  */
3261 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3262 {
3263         s32 ret_val;
3264         s16 mii_autoneg_adv_reg;
3265         s16 mii_1000t_ctrl_reg;
3266
3267         /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3268         mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3269
3270         /* Read the MII 1000Base-T Control Register (Address 9). */
3271         mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3272
3273         /*
3274          * First we clear all the 10/100 mb speed bits in the Auto-Neg
3275          * Advertisement Register (Address 4) and the 1000 mb speed bits in
3276          * the  1000Base-T Control Register (Address 9).
3277          */
3278         mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3279         mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3280
3281         /*
3282          * Need to parse media_type  and set up
3283          * the appropriate PHY registers.
3284          */
3285         switch (hw->media_type) {
3286         case MEDIA_TYPE_AUTO_SENSOR:
3287                 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3288                                         MII_AR_10T_FD_CAPS |
3289                                         MII_AR_100TX_HD_CAPS |
3290                                         MII_AR_100TX_FD_CAPS);
3291                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3292                 break;
3293
3294         case MEDIA_TYPE_1000M_FULL:
3295                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3296                 break;
3297
3298         case MEDIA_TYPE_100M_FULL:
3299                 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3300                 break;
3301
3302         case MEDIA_TYPE_100M_HALF:
3303                 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3304                 break;
3305
3306         case MEDIA_TYPE_10M_FULL:
3307                 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3308                 break;
3309
3310         default:
3311                 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3312                 break;
3313         }
3314
3315         /* flow control fixed to enable all */
3316         mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3317
3318         hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3319         hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3320
3321         ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3322         if (ret_val)
3323                 return ret_val;
3324
3325         ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3326         if (ret_val)
3327                 return ret_val;
3328
3329         return 0;
3330 }
3331
3332 /*
3333  * Configures link settings.
3334  * hw - Struct containing variables accessed by shared code
3335  * Assumes the hardware has previously been reset and the
3336  * transmitter and receiver are not enabled.
3337  */
3338 static s32 atl1_setup_link(struct atl1_hw *hw)
3339 {
3340         struct pci_dev *pdev = hw->back->pdev;
3341         s32 ret_val;
3342
3343         /*
3344          * Options:
3345          *  PHY will advertise value(s) parsed from
3346          *  autoneg_advertised and fc
3347          *  no matter what autoneg is , We will not wait link result.
3348          */
3349         ret_val = atl1_phy_setup_autoneg_adv(hw);
3350         if (ret_val) {
3351                 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3352                 return ret_val;
3353         }
3354         /* SW.Reset , En-Auto-Neg if needed */
3355         ret_val = atl1_phy_reset(hw);
3356         if (ret_val) {
3357                 dev_dbg(&pdev->dev, "error resetting phy\n");
3358                 return ret_val;
3359         }
3360         hw->phy_configured = true;
3361         return ret_val;
3362 }
3363
3364 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3365 {
3366         if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3367                 /* Atmel */
3368                 hw->flash_vendor = 0;
3369
3370         /* Init OP table */
3371         iowrite8(flash_table[hw->flash_vendor].cmd_program,
3372                 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3373         iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3374                 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3375         iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3376                 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3377         iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3378                 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3379         iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3380                 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3381         iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3382                 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3383         iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3384                 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3385         iowrite8(flash_table[hw->flash_vendor].cmd_read,
3386                 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3387 }
3388
3389 /*
3390  * Performs basic configuration of the adapter.
3391  * hw - Struct containing variables accessed by shared code
3392  * Assumes that the controller has previously been reset and is in a
3393  * post-reset uninitialized state. Initializes multicast table,
3394  * and  Calls routines to setup link
3395  * Leaves the transmit and receive units disabled and uninitialized.
3396  */
3397 s32 atl1_init_hw(struct atl1_hw *hw)
3398 {
3399         u32 ret_val = 0;
3400
3401         /* Zero out the Multicast HASH table */
3402         iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3403         /* clear the old settings from the multicast hash table */
3404         iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3405
3406         atl1_init_flash_opcode(hw);
3407
3408         if (!hw->phy_configured) {
3409                 /* enable GPHY LinkChange Interrrupt */
3410                 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3411                 if (ret_val)
3412                         return ret_val;
3413                 /* make PHY out of power-saving state */
3414                 ret_val = atl1_phy_leave_power_saving(hw);
3415                 if (ret_val)
3416                         return ret_val;
3417                 /* Call a subroutine to configure the link */
3418                 ret_val = atl1_setup_link(hw);
3419         }
3420         return ret_val;
3421 }
3422
3423 /*
3424  * Detects the current speed and duplex settings of the hardware.
3425  * hw - Struct containing variables accessed by shared code
3426  * speed - Speed of the connection
3427  * duplex - Duplex setting of the connection
3428  */
3429 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3430 {
3431         struct pci_dev *pdev = hw->back->pdev;
3432         s32 ret_val;
3433         u16 phy_data;
3434
3435         /* ; --- Read   PHY Specific Status Register (17) */
3436         ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3437         if (ret_val)
3438                 return ret_val;
3439
3440         if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3441                 return ATLX_ERR_PHY_RES;
3442
3443         switch (phy_data & MII_ATLX_PSSR_SPEED) {
3444         case MII_ATLX_PSSR_1000MBS:
3445                 *speed = SPEED_1000;
3446                 break;
3447         case MII_ATLX_PSSR_100MBS:
3448                 *speed = SPEED_100;
3449                 break;
3450         case MII_ATLX_PSSR_10MBS:
3451                 *speed = SPEED_10;
3452                 break;
3453         default:
3454                 dev_dbg(&pdev->dev, "error getting speed\n");
3455                 return ATLX_ERR_PHY_SPEED;
3456                 break;
3457         }
3458         if (phy_data & MII_ATLX_PSSR_DPLX)
3459                 *duplex = FULL_DUPLEX;
3460         else
3461                 *duplex = HALF_DUPLEX;
3462
3463         return 0;
3464 }
3465
3466 void atl1_set_mac_addr(struct atl1_hw *hw)
3467 {
3468         u32 value;
3469         /*
3470          * 00-0B-6A-F6-00-DC
3471          * 0:  6AF600DC   1: 000B
3472          * low dword
3473          */
3474         value = (((u32) hw->mac_addr[2]) << 24) |
3475             (((u32) hw->mac_addr[3]) << 16) |
3476             (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3477         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3478         /* high dword */
3479         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3480         iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
3481 }