2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
10 * written by Manish Lachwani
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
15 * Dale Farnsworth <dale@farnsworth.org>
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2
26 * of the License, or (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 #include <linux/init.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/etherdevice.h>
44 #include <linux/delay.h>
45 #include <linux/ethtool.h>
46 #include <linux/platform_device.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <linux/mii.h>
52 #include <linux/mv643xx_eth.h>
54 #include <asm/types.h>
55 #include <asm/system.h>
57 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58 static char mv643xx_eth_driver_version[] = "1.0";
60 #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61 #define MV643XX_ETH_NAPI
62 #define MV643XX_ETH_TX_FAST_REFILL
64 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
67 #define MAX_DESCS_PER_SKB 1
71 * Registers shared between all ports.
73 #define PHY_ADDR 0x0000
74 #define SMI_REG 0x0004
75 #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
76 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
77 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
78 #define WINDOW_BAR_ENABLE 0x0290
79 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
84 #define PORT_CONFIG(p) (0x0400 + ((p) << 10))
85 #define UNICAST_PROMISCUOUS_MODE 0x00000001
86 #define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
87 #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
88 #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
89 #define SDMA_CONFIG(p) (0x041c + ((p) << 10))
90 #define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
91 #define PORT_STATUS(p) (0x0444 + ((p) << 10))
92 #define TX_FIFO_EMPTY 0x00000400
93 #define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
94 #define TX_BW_MTU(p) (0x0458 + ((p) << 10))
95 #define INT_CAUSE(p) (0x0460 + ((p) << 10))
96 #define INT_RX 0x00000804
97 #define INT_EXT 0x00000002
98 #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
99 #define INT_EXT_LINK 0x00100000
100 #define INT_EXT_PHY 0x00010000
101 #define INT_EXT_TX_ERROR_0 0x00000100
102 #define INT_EXT_TX_0 0x00000001
103 #define INT_EXT_TX 0x00000101
104 #define INT_MASK(p) (0x0468 + ((p) << 10))
105 #define INT_MASK_EXT(p) (0x046c + ((p) << 10))
106 #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
107 #define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10))
108 #define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
109 #define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10))
110 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
111 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
112 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
113 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
117 * SDMA configuration register.
119 #define RX_BURST_SIZE_4_64BIT (2 << 1)
120 #define BLM_RX_NO_SWAP (1 << 4)
121 #define BLM_TX_NO_SWAP (1 << 5)
122 #define TX_BURST_SIZE_4_64BIT (2 << 22)
124 #if defined(__BIG_ENDIAN)
125 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
126 RX_BURST_SIZE_4_64BIT | \
127 TX_BURST_SIZE_4_64BIT
128 #elif defined(__LITTLE_ENDIAN)
129 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
130 RX_BURST_SIZE_4_64BIT | \
133 TX_BURST_SIZE_4_64BIT
135 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
140 * Port serial control register.
142 #define SET_MII_SPEED_TO_100 (1 << 24)
143 #define SET_GMII_SPEED_TO_1000 (1 << 23)
144 #define SET_FULL_DUPLEX_MODE (1 << 21)
145 #define MAX_RX_PACKET_1522BYTE (1 << 17)
146 #define MAX_RX_PACKET_9700BYTE (5 << 17)
147 #define MAX_RX_PACKET_MASK (7 << 17)
148 #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
149 #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
150 #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
151 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
152 #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
153 #define FORCE_LINK_PASS (1 << 1)
154 #define SERIAL_PORT_ENABLE (1 << 0)
156 #define DEFAULT_RX_QUEUE_SIZE 400
157 #define DEFAULT_TX_QUEUE_SIZE 800
163 #if defined(__BIG_ENDIAN)
165 u16 byte_cnt; /* Descriptor buffer byte count */
166 u16 buf_size; /* Buffer size */
167 u32 cmd_sts; /* Descriptor command status */
168 u32 next_desc_ptr; /* Next descriptor pointer */
169 u32 buf_ptr; /* Descriptor buffer pointer */
173 u16 byte_cnt; /* buffer byte count */
174 u16 l4i_chk; /* CPU provided TCP checksum */
175 u32 cmd_sts; /* Command/status field */
176 u32 next_desc_ptr; /* Pointer to next descriptor */
177 u32 buf_ptr; /* pointer to buffer for this descriptor*/
179 #elif defined(__LITTLE_ENDIAN)
181 u32 cmd_sts; /* Descriptor command status */
182 u16 buf_size; /* Buffer size */
183 u16 byte_cnt; /* Descriptor buffer byte count */
184 u32 buf_ptr; /* Descriptor buffer pointer */
185 u32 next_desc_ptr; /* Next descriptor pointer */
189 u32 cmd_sts; /* Command/status field */
190 u16 l4i_chk; /* CPU provided TCP checksum */
191 u16 byte_cnt; /* buffer byte count */
192 u32 buf_ptr; /* pointer to buffer for this descriptor*/
193 u32 next_desc_ptr; /* Pointer to next descriptor */
196 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
199 /* RX & TX descriptor command */
200 #define BUFFER_OWNED_BY_DMA 0x80000000
202 /* RX & TX descriptor status */
203 #define ERROR_SUMMARY 0x00000001
205 /* RX descriptor status */
206 #define LAYER_4_CHECKSUM_OK 0x40000000
207 #define RX_ENABLE_INTERRUPT 0x20000000
208 #define RX_FIRST_DESC 0x08000000
209 #define RX_LAST_DESC 0x04000000
211 /* TX descriptor command */
212 #define TX_ENABLE_INTERRUPT 0x00800000
213 #define GEN_CRC 0x00400000
214 #define TX_FIRST_DESC 0x00200000
215 #define TX_LAST_DESC 0x00100000
216 #define ZERO_PADDING 0x00080000
217 #define GEN_IP_V4_CHECKSUM 0x00040000
218 #define GEN_TCP_UDP_CHECKSUM 0x00020000
219 #define UDP_FRAME 0x00010000
221 #define TX_IHL_SHIFT 11
224 /* global *******************************************************************/
225 struct mv643xx_eth_shared_private {
227 * Ethernet controller base address.
232 * Protects access to SMI_REG, which is shared between ports.
237 * Per-port MBUS window access register value.
242 * Hardware-specific parameters.
248 /* per-port *****************************************************************/
249 struct mib_counters {
250 u64 good_octets_received;
251 u32 bad_octets_received;
252 u32 internal_mac_transmit_err;
253 u32 good_frames_received;
254 u32 bad_frames_received;
255 u32 broadcast_frames_received;
256 u32 multicast_frames_received;
257 u32 frames_64_octets;
258 u32 frames_65_to_127_octets;
259 u32 frames_128_to_255_octets;
260 u32 frames_256_to_511_octets;
261 u32 frames_512_to_1023_octets;
262 u32 frames_1024_to_max_octets;
263 u64 good_octets_sent;
264 u32 good_frames_sent;
265 u32 excessive_collision;
266 u32 multicast_frames_sent;
267 u32 broadcast_frames_sent;
268 u32 unrec_mac_control_received;
270 u32 good_fc_received;
272 u32 undersize_received;
273 u32 fragments_received;
274 u32 oversize_received;
276 u32 mac_receive_error;
289 struct rx_desc *rx_desc_area;
290 dma_addr_t rx_desc_dma;
291 int rx_desc_area_size;
292 struct sk_buff **rx_skb;
294 struct timer_list rx_oom;
304 struct tx_desc *tx_desc_area;
305 dma_addr_t tx_desc_dma;
306 int tx_desc_area_size;
307 struct sk_buff **tx_skb;
310 struct mv643xx_eth_private {
311 struct mv643xx_eth_shared_private *shared;
314 struct net_device *dev;
316 struct mv643xx_eth_shared_private *shared_smi;
321 struct mib_counters mib_counters;
322 struct work_struct tx_timeout_task;
323 struct mii_if_info mii;
328 int default_rx_ring_size;
329 unsigned long rx_desc_sram_addr;
330 int rx_desc_sram_size;
331 struct napi_struct napi;
332 struct rx_queue rxq[1];
337 int default_tx_ring_size;
338 unsigned long tx_desc_sram_addr;
339 int tx_desc_sram_size;
340 struct tx_queue txq[1];
341 #ifdef MV643XX_ETH_TX_FAST_REFILL
342 int tx_clean_threshold;
347 /* port register accessors **************************************************/
348 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
350 return readl(mp->shared->base + offset);
353 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
355 writel(data, mp->shared->base + offset);
359 /* rxq/txq helper functions *************************************************/
360 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
362 return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
365 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
367 return container_of(txq, struct mv643xx_eth_private, txq[0]);
370 static void rxq_enable(struct rx_queue *rxq)
372 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
373 wrl(mp, RXQ_COMMAND(mp->port_num), 1);
376 static void rxq_disable(struct rx_queue *rxq)
378 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
381 wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
382 while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
386 static void txq_enable(struct tx_queue *txq)
388 struct mv643xx_eth_private *mp = txq_to_mp(txq);
389 wrl(mp, TXQ_COMMAND(mp->port_num), 1);
392 static void txq_disable(struct tx_queue *txq)
394 struct mv643xx_eth_private *mp = txq_to_mp(txq);
397 wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
398 while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
402 static void __txq_maybe_wake(struct tx_queue *txq)
404 struct mv643xx_eth_private *mp = txq_to_mp(txq);
406 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
407 netif_wake_queue(mp->dev);
411 /* rx ***********************************************************************/
412 static void txq_reclaim(struct tx_queue *txq, int force);
414 static void rxq_refill(struct rx_queue *rxq)
416 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
419 spin_lock_irqsave(&mp->lock, flags);
421 while (rxq->rx_desc_count < rxq->rx_ring_size) {
428 * Reserve 2+14 bytes for an ethernet header (the
429 * hardware automatically prepends 2 bytes of dummy
430 * data to each received packet), 4 bytes for a VLAN
431 * header, and 4 bytes for the trailing FCS -- 24
434 skb_size = mp->dev->mtu + 24;
436 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
440 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
442 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
444 rxq->rx_desc_count++;
445 rx = rxq->rx_used_desc;
446 rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
448 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
449 skb_size, DMA_FROM_DEVICE);
450 rxq->rx_desc_area[rx].buf_size = skb_size;
451 rxq->rx_skb[rx] = skb;
453 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
458 * The hardware automatically prepends 2 bytes of
459 * dummy data to each received packet, so that the
460 * IP header ends up 16-byte aligned.
465 if (rxq->rx_desc_count == 0) {
466 rxq->rx_oom.expires = jiffies + (HZ / 10);
467 add_timer(&rxq->rx_oom);
470 spin_unlock_irqrestore(&mp->lock, flags);
473 static inline void rxq_refill_timer_wrapper(unsigned long data)
475 rxq_refill((struct rx_queue *)data);
478 static int rxq_process(struct rx_queue *rxq, int budget)
480 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
481 struct net_device_stats *stats = &mp->dev->stats;
485 while (rx < budget) {
486 struct rx_desc *rx_desc;
487 unsigned int cmd_sts;
491 spin_lock_irqsave(&mp->lock, flags);
493 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
495 cmd_sts = rx_desc->cmd_sts;
496 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
497 spin_unlock_irqrestore(&mp->lock, flags);
502 skb = rxq->rx_skb[rxq->rx_curr_desc];
503 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
505 rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
507 spin_unlock_irqrestore(&mp->lock, flags);
509 dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
510 mp->dev->mtu + 24, DMA_FROM_DEVICE);
511 rxq->rx_desc_count--;
517 * Note that the descriptor byte count includes 2 dummy
518 * bytes automatically inserted by the hardware at the
519 * start of the packet (which we don't count), and a 4
520 * byte CRC at the end of the packet (which we do count).
523 stats->rx_bytes += rx_desc->byte_cnt - 2;
526 * In case we received a packet without first / last bits
527 * on, or the error summary bit is set, the packet needs
530 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
531 (RX_FIRST_DESC | RX_LAST_DESC))
532 || (cmd_sts & ERROR_SUMMARY)) {
535 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
536 (RX_FIRST_DESC | RX_LAST_DESC)) {
538 dev_printk(KERN_ERR, &mp->dev->dev,
539 "received packet spanning "
540 "multiple descriptors\n");
543 if (cmd_sts & ERROR_SUMMARY)
546 dev_kfree_skb_irq(skb);
549 * The -4 is for the CRC in the trailer of the
552 skb_put(skb, rx_desc->byte_cnt - 2 - 4);
554 if (cmd_sts & LAYER_4_CHECKSUM_OK) {
555 skb->ip_summed = CHECKSUM_UNNECESSARY;
557 (cmd_sts & 0x0007fff8) >> 3);
559 skb->protocol = eth_type_trans(skb, mp->dev);
560 #ifdef MV643XX_ETH_NAPI
561 netif_receive_skb(skb);
567 mp->dev->last_rx = jiffies;
575 #ifdef MV643XX_ETH_NAPI
576 static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
578 struct mv643xx_eth_private *mp;
581 mp = container_of(napi, struct mv643xx_eth_private, napi);
583 #ifdef MV643XX_ETH_TX_FAST_REFILL
584 if (++mp->tx_clean_threshold > 5) {
585 txq_reclaim(mp->txq, 0);
586 mp->tx_clean_threshold = 0;
590 rx = rxq_process(mp->rxq, budget);
593 netif_rx_complete(mp->dev, napi);
594 wrl(mp, INT_CAUSE(mp->port_num), 0);
595 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
596 wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
604 /* tx ***********************************************************************/
605 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
609 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
610 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
611 if (fragp->size <= 8 && fragp->page_offset & 7)
618 static int txq_alloc_desc_index(struct tx_queue *txq)
622 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
624 tx_desc_curr = txq->tx_curr_desc;
625 txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
627 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
632 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
634 int nr_frags = skb_shinfo(skb)->nr_frags;
637 for (frag = 0; frag < nr_frags; frag++) {
638 skb_frag_t *this_frag;
640 struct tx_desc *desc;
642 this_frag = &skb_shinfo(skb)->frags[frag];
643 tx_index = txq_alloc_desc_index(txq);
644 desc = &txq->tx_desc_area[tx_index];
647 * The last fragment will generate an interrupt
648 * which will free the skb on TX completion.
650 if (frag == nr_frags - 1) {
651 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
652 ZERO_PADDING | TX_LAST_DESC |
654 txq->tx_skb[tx_index] = skb;
656 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
657 txq->tx_skb[tx_index] = NULL;
661 desc->byte_cnt = this_frag->size;
662 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
663 this_frag->page_offset,
669 static inline __be16 sum16_as_be(__sum16 sum)
671 return (__force __be16)sum;
674 static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
676 int nr_frags = skb_shinfo(skb)->nr_frags;
678 struct tx_desc *desc;
682 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
684 tx_index = txq_alloc_desc_index(txq);
685 desc = &txq->tx_desc_area[tx_index];
688 txq_submit_frag_skb(txq, skb);
690 length = skb_headlen(skb);
691 txq->tx_skb[tx_index] = NULL;
693 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
695 txq->tx_skb[tx_index] = skb;
698 desc->byte_cnt = length;
699 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
701 if (skb->ip_summed == CHECKSUM_PARTIAL) {
702 BUG_ON(skb->protocol != htons(ETH_P_IP));
704 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
706 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
708 switch (ip_hdr(skb)->protocol) {
710 cmd_sts |= UDP_FRAME;
711 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
714 desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
720 /* Errata BTS #50, IHL must be 5 if no HW checksum */
721 cmd_sts |= 5 << TX_IHL_SHIFT;
725 /* ensure all other descriptors are written before first cmd_sts */
727 desc->cmd_sts = cmd_sts;
729 /* ensure all descriptors are written before poking hardware */
733 txq->tx_desc_count += nr_frags + 1;
736 static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
738 struct mv643xx_eth_private *mp = netdev_priv(dev);
739 struct net_device_stats *stats = &dev->stats;
740 struct tx_queue *txq;
743 BUG_ON(netif_queue_stopped(dev));
745 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
747 dev_printk(KERN_DEBUG, &dev->dev,
748 "failed to linearize skb with tiny "
749 "unaligned fragment\n");
750 return NETDEV_TX_BUSY;
753 spin_lock_irqsave(&mp->lock, flags);
757 if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
758 printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
759 netif_stop_queue(dev);
760 spin_unlock_irqrestore(&mp->lock, flags);
761 return NETDEV_TX_BUSY;
764 txq_submit_skb(txq, skb);
765 stats->tx_bytes += skb->len;
767 dev->trans_start = jiffies;
769 if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB)
770 netif_stop_queue(dev);
772 spin_unlock_irqrestore(&mp->lock, flags);
778 /* mii management interface *************************************************/
779 #define SMI_BUSY 0x10000000
780 #define SMI_READ_VALID 0x08000000
781 #define SMI_OPCODE_READ 0x04000000
782 #define SMI_OPCODE_WRITE 0x00000000
784 static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
785 unsigned int reg, unsigned int *value)
787 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
791 /* the SMI register is a shared resource */
792 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
794 /* wait for the SMI register to become available */
795 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
797 printk("%s: PHY busy timeout\n", mp->dev->name);
803 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
805 /* now wait for the data to be valid */
806 for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
808 printk("%s: PHY read timeout\n", mp->dev->name);
814 *value = readl(smi_reg) & 0xffff;
816 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
819 static void smi_reg_write(struct mv643xx_eth_private *mp,
821 unsigned int reg, unsigned int value)
823 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
827 /* the SMI register is a shared resource */
828 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
830 /* wait for the SMI register to become available */
831 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
833 printk("%s: PHY busy timeout\n", mp->dev->name);
839 writel(SMI_OPCODE_WRITE | (reg << 21) |
840 (addr << 16) | (value & 0xffff), smi_reg);
842 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
846 /* mib counters *************************************************************/
847 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
849 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
852 static void mib_counters_clear(struct mv643xx_eth_private *mp)
856 for (i = 0; i < 0x80; i += 4)
860 static void mib_counters_update(struct mv643xx_eth_private *mp)
862 struct mib_counters *p = &mp->mib_counters;
864 p->good_octets_received += mib_read(mp, 0x00);
865 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
866 p->bad_octets_received += mib_read(mp, 0x08);
867 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
868 p->good_frames_received += mib_read(mp, 0x10);
869 p->bad_frames_received += mib_read(mp, 0x14);
870 p->broadcast_frames_received += mib_read(mp, 0x18);
871 p->multicast_frames_received += mib_read(mp, 0x1c);
872 p->frames_64_octets += mib_read(mp, 0x20);
873 p->frames_65_to_127_octets += mib_read(mp, 0x24);
874 p->frames_128_to_255_octets += mib_read(mp, 0x28);
875 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
876 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
877 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
878 p->good_octets_sent += mib_read(mp, 0x38);
879 p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
880 p->good_frames_sent += mib_read(mp, 0x40);
881 p->excessive_collision += mib_read(mp, 0x44);
882 p->multicast_frames_sent += mib_read(mp, 0x48);
883 p->broadcast_frames_sent += mib_read(mp, 0x4c);
884 p->unrec_mac_control_received += mib_read(mp, 0x50);
885 p->fc_sent += mib_read(mp, 0x54);
886 p->good_fc_received += mib_read(mp, 0x58);
887 p->bad_fc_received += mib_read(mp, 0x5c);
888 p->undersize_received += mib_read(mp, 0x60);
889 p->fragments_received += mib_read(mp, 0x64);
890 p->oversize_received += mib_read(mp, 0x68);
891 p->jabber_received += mib_read(mp, 0x6c);
892 p->mac_receive_error += mib_read(mp, 0x70);
893 p->bad_crc_event += mib_read(mp, 0x74);
894 p->collision += mib_read(mp, 0x78);
895 p->late_collision += mib_read(mp, 0x7c);
899 /* ethtool ******************************************************************/
900 struct mv643xx_eth_stats {
901 char stat_string[ETH_GSTRING_LEN];
908 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
909 offsetof(struct net_device, stats.m), -1 }
912 { #m, FIELD_SIZEOF(struct mib_counters, m), \
913 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
915 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
924 MIBSTAT(good_octets_received),
925 MIBSTAT(bad_octets_received),
926 MIBSTAT(internal_mac_transmit_err),
927 MIBSTAT(good_frames_received),
928 MIBSTAT(bad_frames_received),
929 MIBSTAT(broadcast_frames_received),
930 MIBSTAT(multicast_frames_received),
931 MIBSTAT(frames_64_octets),
932 MIBSTAT(frames_65_to_127_octets),
933 MIBSTAT(frames_128_to_255_octets),
934 MIBSTAT(frames_256_to_511_octets),
935 MIBSTAT(frames_512_to_1023_octets),
936 MIBSTAT(frames_1024_to_max_octets),
937 MIBSTAT(good_octets_sent),
938 MIBSTAT(good_frames_sent),
939 MIBSTAT(excessive_collision),
940 MIBSTAT(multicast_frames_sent),
941 MIBSTAT(broadcast_frames_sent),
942 MIBSTAT(unrec_mac_control_received),
944 MIBSTAT(good_fc_received),
945 MIBSTAT(bad_fc_received),
946 MIBSTAT(undersize_received),
947 MIBSTAT(fragments_received),
948 MIBSTAT(oversize_received),
949 MIBSTAT(jabber_received),
950 MIBSTAT(mac_receive_error),
951 MIBSTAT(bad_crc_event),
953 MIBSTAT(late_collision),
956 static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
958 struct mv643xx_eth_private *mp = netdev_priv(dev);
961 spin_lock_irq(&mp->lock);
962 err = mii_ethtool_gset(&mp->mii, cmd);
963 spin_unlock_irq(&mp->lock);
966 * The MAC does not support 1000baseT_Half.
968 cmd->supported &= ~SUPPORTED_1000baseT_Half;
969 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
974 static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
976 struct mv643xx_eth_private *mp = netdev_priv(dev);
980 * The MAC does not support 1000baseT_Half.
982 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
984 spin_lock_irq(&mp->lock);
985 err = mii_ethtool_sset(&mp->mii, cmd);
986 spin_unlock_irq(&mp->lock);
991 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
992 struct ethtool_drvinfo *drvinfo)
994 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
995 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
996 strncpy(drvinfo->fw_version, "N/A", 32);
997 strncpy(drvinfo->bus_info, "platform", 32);
998 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1001 static int mv643xx_eth_nway_reset(struct net_device *dev)
1003 struct mv643xx_eth_private *mp = netdev_priv(dev);
1005 return mii_nway_restart(&mp->mii);
1008 static u32 mv643xx_eth_get_link(struct net_device *dev)
1010 struct mv643xx_eth_private *mp = netdev_priv(dev);
1012 return mii_link_ok(&mp->mii);
1015 static void mv643xx_eth_get_strings(struct net_device *dev,
1016 uint32_t stringset, uint8_t *data)
1020 if (stringset == ETH_SS_STATS) {
1021 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1022 memcpy(data + i * ETH_GSTRING_LEN,
1023 mv643xx_eth_stats[i].stat_string,
1029 static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1030 struct ethtool_stats *stats,
1033 struct mv643xx_eth_private *mp = dev->priv;
1036 mib_counters_update(mp);
1038 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1039 const struct mv643xx_eth_stats *stat;
1042 stat = mv643xx_eth_stats + i;
1044 if (stat->netdev_off >= 0)
1045 p = ((void *)mp->dev) + stat->netdev_off;
1047 p = ((void *)mp) + stat->mp_off;
1049 data[i] = (stat->sizeof_stat == 8) ?
1050 *(uint64_t *)p : *(uint32_t *)p;
1054 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1056 if (sset == ETH_SS_STATS)
1057 return ARRAY_SIZE(mv643xx_eth_stats);
1062 static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1063 .get_settings = mv643xx_eth_get_settings,
1064 .set_settings = mv643xx_eth_set_settings,
1065 .get_drvinfo = mv643xx_eth_get_drvinfo,
1066 .nway_reset = mv643xx_eth_nway_reset,
1067 .get_link = mv643xx_eth_get_link,
1068 .set_sg = ethtool_op_set_sg,
1069 .get_strings = mv643xx_eth_get_strings,
1070 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1071 .get_sset_count = mv643xx_eth_get_sset_count,
1075 /* address handling *********************************************************/
1076 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1081 mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
1082 mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
1084 addr[0] = (mac_h >> 24) & 0xff;
1085 addr[1] = (mac_h >> 16) & 0xff;
1086 addr[2] = (mac_h >> 8) & 0xff;
1087 addr[3] = mac_h & 0xff;
1088 addr[4] = (mac_l >> 8) & 0xff;
1089 addr[5] = mac_l & 0xff;
1092 static void init_mac_tables(struct mv643xx_eth_private *mp)
1096 for (i = 0; i < 0x100; i += 4) {
1097 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
1098 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1101 for (i = 0; i < 0x10; i += 4)
1102 wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1105 static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1106 int table, unsigned char entry)
1108 unsigned int table_reg;
1110 /* Set "accepts frame bit" at specified table entry */
1111 table_reg = rdl(mp, table + (entry & 0xfc));
1112 table_reg |= 0x01 << (8 * (entry & 3));
1113 wrl(mp, table + (entry & 0xfc), table_reg);
1116 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1122 mac_l = (addr[4] << 8) | addr[5];
1123 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1125 wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
1126 wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
1128 table = UNICAST_TABLE(mp->port_num);
1129 set_filter_table_entry(mp, table, addr[5] & 0x0f);
1132 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1134 struct mv643xx_eth_private *mp = netdev_priv(dev);
1136 /* +2 is for the offset of the HW addr type */
1137 memcpy(dev->dev_addr, addr + 2, 6);
1139 init_mac_tables(mp);
1140 uc_addr_set(mp, dev->dev_addr);
1145 static int addr_crc(unsigned char *addr)
1150 for (i = 0; i < 6; i++) {
1153 crc = (crc ^ addr[i]) << 8;
1154 for (j = 7; j >= 0; j--) {
1155 if (crc & (0x100 << j))
1163 static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1165 struct mv643xx_eth_private *mp = netdev_priv(dev);
1167 struct dev_addr_list *addr;
1170 port_config = rdl(mp, PORT_CONFIG(mp->port_num));
1171 if (dev->flags & IFF_PROMISC)
1172 port_config |= UNICAST_PROMISCUOUS_MODE;
1174 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1175 wrl(mp, PORT_CONFIG(mp->port_num), port_config);
1177 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1178 int port_num = mp->port_num;
1179 u32 accept = 0x01010101;
1181 for (i = 0; i < 0x100; i += 4) {
1182 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1183 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1188 for (i = 0; i < 0x100; i += 4) {
1189 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
1190 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1193 for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
1194 u8 *a = addr->da_addr;
1197 if (addr->da_addrlen != 6)
1200 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1201 table = SPECIAL_MCAST_TABLE(mp->port_num);
1202 set_filter_table_entry(mp, table, a[5]);
1204 int crc = addr_crc(a);
1206 table = OTHER_MCAST_TABLE(mp->port_num);
1207 set_filter_table_entry(mp, table, crc);
1213 /* rx/tx queue initialisation ***********************************************/
1214 static int rxq_init(struct mv643xx_eth_private *mp)
1216 struct rx_queue *rxq = mp->rxq;
1217 struct rx_desc *rx_desc;
1221 rxq->rx_ring_size = mp->default_rx_ring_size;
1223 rxq->rx_desc_count = 0;
1224 rxq->rx_curr_desc = 0;
1225 rxq->rx_used_desc = 0;
1227 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1229 if (size <= mp->rx_desc_sram_size) {
1230 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1231 mp->rx_desc_sram_size);
1232 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1234 rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
1239 if (rxq->rx_desc_area == NULL) {
1240 dev_printk(KERN_ERR, &mp->dev->dev,
1241 "can't allocate rx ring (%d bytes)\n", size);
1244 memset(rxq->rx_desc_area, 0, size);
1246 rxq->rx_desc_area_size = size;
1247 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1249 if (rxq->rx_skb == NULL) {
1250 dev_printk(KERN_ERR, &mp->dev->dev,
1251 "can't allocate rx skb ring\n");
1255 rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1256 for (i = 0; i < rxq->rx_ring_size; i++) {
1257 int nexti = (i + 1) % rxq->rx_ring_size;
1258 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1259 nexti * sizeof(struct rx_desc);
1262 init_timer(&rxq->rx_oom);
1263 rxq->rx_oom.data = (unsigned long)rxq;
1264 rxq->rx_oom.function = rxq_refill_timer_wrapper;
1270 if (size <= mp->rx_desc_sram_size)
1271 iounmap(rxq->rx_desc_area);
1273 dma_free_coherent(NULL, size,
1281 static void rxq_deinit(struct rx_queue *rxq)
1283 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1288 del_timer_sync(&rxq->rx_oom);
1290 for (i = 0; i < rxq->rx_ring_size; i++) {
1291 if (rxq->rx_skb[i]) {
1292 dev_kfree_skb(rxq->rx_skb[i]);
1293 rxq->rx_desc_count--;
1297 if (rxq->rx_desc_count) {
1298 dev_printk(KERN_ERR, &mp->dev->dev,
1299 "error freeing rx ring -- %d skbs stuck\n",
1300 rxq->rx_desc_count);
1303 if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1304 iounmap(rxq->rx_desc_area);
1306 dma_free_coherent(NULL, rxq->rx_desc_area_size,
1307 rxq->rx_desc_area, rxq->rx_desc_dma);
1312 static int txq_init(struct mv643xx_eth_private *mp)
1314 struct tx_queue *txq = mp->txq;
1315 struct tx_desc *tx_desc;
1319 txq->tx_ring_size = mp->default_tx_ring_size;
1321 txq->tx_desc_count = 0;
1322 txq->tx_curr_desc = 0;
1323 txq->tx_used_desc = 0;
1325 size = txq->tx_ring_size * sizeof(struct tx_desc);
1327 if (size <= mp->tx_desc_sram_size) {
1328 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1329 mp->tx_desc_sram_size);
1330 txq->tx_desc_dma = mp->tx_desc_sram_addr;
1332 txq->tx_desc_area = dma_alloc_coherent(NULL, size,
1337 if (txq->tx_desc_area == NULL) {
1338 dev_printk(KERN_ERR, &mp->dev->dev,
1339 "can't allocate tx ring (%d bytes)\n", size);
1342 memset(txq->tx_desc_area, 0, size);
1344 txq->tx_desc_area_size = size;
1345 txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
1347 if (txq->tx_skb == NULL) {
1348 dev_printk(KERN_ERR, &mp->dev->dev,
1349 "can't allocate tx skb ring\n");
1353 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1354 for (i = 0; i < txq->tx_ring_size; i++) {
1355 int nexti = (i + 1) % txq->tx_ring_size;
1356 tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
1357 nexti * sizeof(struct tx_desc);
1364 if (size <= mp->tx_desc_sram_size)
1365 iounmap(txq->tx_desc_area);
1367 dma_free_coherent(NULL, size,
1375 static void txq_reclaim(struct tx_queue *txq, int force)
1377 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1378 unsigned long flags;
1380 spin_lock_irqsave(&mp->lock, flags);
1381 while (txq->tx_desc_count > 0) {
1383 struct tx_desc *desc;
1385 struct sk_buff *skb;
1389 tx_index = txq->tx_used_desc;
1390 desc = &txq->tx_desc_area[tx_index];
1391 cmd_sts = desc->cmd_sts;
1393 if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
1396 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
1397 txq->tx_desc_count--;
1399 addr = desc->buf_ptr;
1400 count = desc->byte_cnt;
1401 skb = txq->tx_skb[tx_index];
1402 txq->tx_skb[tx_index] = NULL;
1404 if (cmd_sts & ERROR_SUMMARY) {
1405 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
1406 mp->dev->stats.tx_errors++;
1410 * Drop mp->lock while we free the skb.
1412 spin_unlock_irqrestore(&mp->lock, flags);
1414 if (cmd_sts & TX_FIRST_DESC)
1415 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
1417 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1420 dev_kfree_skb_irq(skb);
1422 spin_lock_irqsave(&mp->lock, flags);
1424 spin_unlock_irqrestore(&mp->lock, flags);
1427 static void txq_deinit(struct tx_queue *txq)
1429 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1432 txq_reclaim(txq, 1);
1434 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1436 if (txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1437 iounmap(txq->tx_desc_area);
1439 dma_free_coherent(NULL, txq->tx_desc_area_size,
1440 txq->tx_desc_area, txq->tx_desc_dma);
1446 /* netdev ops and related ***************************************************/
1447 static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
1452 pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1454 /* clear speed, duplex and rx buffer size fields */
1455 pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 |
1456 SET_GMII_SPEED_TO_1000 |
1457 SET_FULL_DUPLEX_MODE |
1458 MAX_RX_PACKET_MASK);
1460 if (speed == SPEED_1000) {
1461 pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
1463 if (speed == SPEED_100)
1464 pscr_n |= SET_MII_SPEED_TO_100;
1465 pscr_n |= MAX_RX_PACKET_1522BYTE;
1468 if (duplex == DUPLEX_FULL)
1469 pscr_n |= SET_FULL_DUPLEX_MODE;
1471 if (pscr_n != pscr_o) {
1472 if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
1473 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1475 txq_disable(mp->txq);
1476 pscr_o &= ~SERIAL_PORT_ENABLE;
1477 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
1478 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1479 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1480 txq_enable(mp->txq);
1485 static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1487 struct net_device *dev = (struct net_device *)dev_id;
1488 struct mv643xx_eth_private *mp = netdev_priv(dev);
1492 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
1497 if (int_cause & INT_EXT) {
1498 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1499 & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1500 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1503 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
1504 if (mii_link_ok(&mp->mii)) {
1505 struct ethtool_cmd cmd;
1507 mii_ethtool_gset(&mp->mii, &cmd);
1508 update_pscr(mp, cmd.speed, cmd.duplex);
1509 txq_enable(mp->txq);
1510 if (!netif_carrier_ok(dev)) {
1511 netif_carrier_on(dev);
1512 __txq_maybe_wake(mp->txq);
1514 } else if (netif_carrier_ok(dev)) {
1515 netif_stop_queue(dev);
1516 netif_carrier_off(dev);
1520 #ifdef MV643XX_ETH_NAPI
1521 if (int_cause & INT_RX) {
1522 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1523 rdl(mp, INT_MASK(mp->port_num));
1525 netif_rx_schedule(dev, &mp->napi);
1528 if (int_cause & INT_RX)
1529 rxq_process(mp->rxq, INT_MAX);
1532 if (int_cause_ext & INT_EXT_TX) {
1533 txq_reclaim(mp->txq, 0);
1534 __txq_maybe_wake(mp->txq);
1540 static void phy_reset(struct mv643xx_eth_private *mp)
1544 smi_reg_read(mp, mp->phy_addr, 0, &data);
1546 smi_reg_write(mp, mp->phy_addr, 0, data);
1550 smi_reg_read(mp, mp->phy_addr, 0, &data);
1551 } while (data & 0x8000);
1554 static void port_start(struct mv643xx_eth_private *mp)
1557 struct ethtool_cmd ethtool_cmd;
1561 * Configure basic link parameters.
1563 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1564 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1565 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1566 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1567 DISABLE_AUTO_NEG_SPEED_GMII |
1568 DISABLE_AUTO_NEG_FOR_DUPLEX |
1569 DO_NOT_FORCE_LINK_FAIL |
1570 SERIAL_PORT_CONTROL_RESERVED;
1571 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1572 pscr |= SERIAL_PORT_ENABLE;
1573 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1575 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1577 mv643xx_eth_get_settings(mp->dev, ðtool_cmd);
1579 mv643xx_eth_set_settings(mp->dev, ðtool_cmd);
1582 * Configure TX path and queues.
1584 wrl(mp, TX_BW_MTU(mp->port_num), 0);
1585 for (i = 0; i < 1; i++) {
1586 struct tx_queue *txq = mp->txq;
1587 int off = TXQ_CURRENT_DESC_PTR(mp->port_num);
1590 addr = (u32)txq->tx_desc_dma;
1591 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
1596 * Add configured unicast address to address filter table.
1598 uc_addr_set(mp, mp->dev->dev_addr);
1601 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1602 * frames to RX queue #0.
1604 wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1607 * Treat BPDUs as normal multicasts, and disable partition mode.
1609 wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1612 * Enable the receive queue.
1614 for (i = 0; i < 1; i++) {
1615 struct rx_queue *rxq = mp->rxq;
1616 int off = RXQ_CURRENT_DESC_PTR(mp->port_num);
1619 addr = (u32)rxq->rx_desc_dma;
1620 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
1627 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
1629 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1634 wrl(mp, SDMA_CONFIG(mp->port_num),
1635 ((coal & 0x3fff) << 8) |
1636 (rdl(mp, SDMA_CONFIG(mp->port_num))
1640 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
1642 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1646 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1649 static int mv643xx_eth_open(struct net_device *dev)
1651 struct mv643xx_eth_private *mp = netdev_priv(dev);
1654 wrl(mp, INT_CAUSE(mp->port_num), 0);
1655 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
1656 rdl(mp, INT_CAUSE_EXT(mp->port_num));
1658 err = request_irq(dev->irq, mv643xx_eth_irq,
1659 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
1662 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1666 init_mac_tables(mp);
1671 rxq_refill(mp->rxq);
1677 #ifdef MV643XX_ETH_NAPI
1678 napi_enable(&mp->napi);
1686 wrl(mp, INT_MASK_EXT(mp->port_num),
1687 INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1689 wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
1695 rxq_deinit(mp->rxq);
1697 free_irq(dev->irq, dev);
1702 static void port_reset(struct mv643xx_eth_private *mp)
1706 txq_disable(mp->txq);
1707 rxq_disable(mp->rxq);
1708 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
1711 /* Reset the Enable bit in the Configuration Register */
1712 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1713 data &= ~(SERIAL_PORT_ENABLE |
1714 DO_NOT_FORCE_LINK_FAIL |
1716 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
1719 static int mv643xx_eth_stop(struct net_device *dev)
1721 struct mv643xx_eth_private *mp = netdev_priv(dev);
1723 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1724 rdl(mp, INT_MASK(mp->port_num));
1726 #ifdef MV643XX_ETH_NAPI
1727 napi_disable(&mp->napi);
1729 netif_carrier_off(dev);
1730 netif_stop_queue(dev);
1732 free_irq(dev->irq, dev);
1735 mib_counters_update(mp);
1737 txq_deinit(mp->txq);
1738 rxq_deinit(mp->rxq);
1743 static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1745 struct mv643xx_eth_private *mp = netdev_priv(dev);
1747 return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
1750 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1752 if (new_mtu < 64 || new_mtu > 9500)
1756 if (!netif_running(dev))
1760 * Stop and then re-open the interface. This will allocate RX
1761 * skbs of the new MTU.
1762 * There is a possible danger that the open will not succeed,
1763 * due to memory being full.
1765 mv643xx_eth_stop(dev);
1766 if (mv643xx_eth_open(dev)) {
1767 dev_printk(KERN_ERR, &dev->dev,
1768 "fatal error on re-opening device after "
1775 static void tx_timeout_task(struct work_struct *ugly)
1777 struct mv643xx_eth_private *mp;
1779 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
1780 if (netif_running(mp->dev)) {
1781 netif_stop_queue(mp->dev);
1786 __txq_maybe_wake(mp->txq);
1790 static void mv643xx_eth_tx_timeout(struct net_device *dev)
1792 struct mv643xx_eth_private *mp = netdev_priv(dev);
1794 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
1796 schedule_work(&mp->tx_timeout_task);
1799 #ifdef CONFIG_NET_POLL_CONTROLLER
1800 static void mv643xx_eth_netpoll(struct net_device *dev)
1802 struct mv643xx_eth_private *mp = netdev_priv(dev);
1804 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1805 rdl(mp, INT_MASK(mp->port_num));
1807 mv643xx_eth_irq(dev->irq, dev);
1809 wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_CAUSE_EXT);
1813 static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
1815 struct mv643xx_eth_private *mp = netdev_priv(dev);
1818 smi_reg_read(mp, addr, reg, &val);
1823 static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
1825 struct mv643xx_eth_private *mp = netdev_priv(dev);
1826 smi_reg_write(mp, addr, reg, val);
1830 /* platform glue ************************************************************/
1832 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
1833 struct mbus_dram_target_info *dram)
1835 void __iomem *base = msp->base;
1840 for (i = 0; i < 6; i++) {
1841 writel(0, base + WINDOW_BASE(i));
1842 writel(0, base + WINDOW_SIZE(i));
1844 writel(0, base + WINDOW_REMAP_HIGH(i));
1850 for (i = 0; i < dram->num_cs; i++) {
1851 struct mbus_dram_window *cs = dram->cs + i;
1853 writel((cs->base & 0xffff0000) |
1854 (cs->mbus_attr << 8) |
1855 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1856 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1858 win_enable &= ~(1 << i);
1859 win_protect |= 3 << (2 * i);
1862 writel(win_enable, base + WINDOW_BAR_ENABLE);
1863 msp->win_protect = win_protect;
1866 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1868 static int mv643xx_eth_version_printed = 0;
1869 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
1870 struct mv643xx_eth_shared_private *msp;
1871 struct resource *res;
1874 if (!mv643xx_eth_version_printed++)
1875 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1878 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1883 msp = kmalloc(sizeof(*msp), GFP_KERNEL);
1886 memset(msp, 0, sizeof(*msp));
1888 msp->base = ioremap(res->start, res->end - res->start + 1);
1889 if (msp->base == NULL)
1892 spin_lock_init(&msp->phy_lock);
1895 * (Re-)program MBUS remapping windows if we are asked to.
1897 if (pd != NULL && pd->dram != NULL)
1898 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
1901 * Detect hardware parameters.
1903 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
1905 platform_set_drvdata(pdev, msp);
1915 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
1917 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
1925 static struct platform_driver mv643xx_eth_shared_driver = {
1926 .probe = mv643xx_eth_shared_probe,
1927 .remove = mv643xx_eth_shared_remove,
1929 .name = MV643XX_ETH_SHARED_NAME,
1930 .owner = THIS_MODULE,
1934 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
1936 int addr_shift = 5 * mp->port_num;
1939 data = rdl(mp, PHY_ADDR);
1940 data &= ~(0x1f << addr_shift);
1941 data |= (phy_addr & 0x1f) << addr_shift;
1942 wrl(mp, PHY_ADDR, data);
1945 static int phy_addr_get(struct mv643xx_eth_private *mp)
1949 data = rdl(mp, PHY_ADDR);
1951 return (data >> (5 * mp->port_num)) & 0x1f;
1954 static void set_params(struct mv643xx_eth_private *mp,
1955 struct mv643xx_eth_platform_data *pd)
1957 struct net_device *dev = mp->dev;
1959 if (is_valid_ether_addr(pd->mac_addr))
1960 memcpy(dev->dev_addr, pd->mac_addr, 6);
1962 uc_addr_get(mp, dev->dev_addr);
1964 if (pd->phy_addr == -1) {
1965 mp->shared_smi = NULL;
1968 mp->shared_smi = mp->shared;
1969 if (pd->shared_smi != NULL)
1970 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
1972 if (pd->force_phy_addr || pd->phy_addr) {
1973 mp->phy_addr = pd->phy_addr & 0x3f;
1974 phy_addr_set(mp, mp->phy_addr);
1976 mp->phy_addr = phy_addr_get(mp);
1980 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
1981 if (pd->rx_queue_size)
1982 mp->default_rx_ring_size = pd->rx_queue_size;
1983 mp->rx_desc_sram_addr = pd->rx_sram_addr;
1984 mp->rx_desc_sram_size = pd->rx_sram_size;
1986 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
1987 if (pd->tx_queue_size)
1988 mp->default_tx_ring_size = pd->tx_queue_size;
1989 mp->tx_desc_sram_addr = pd->tx_sram_addr;
1990 mp->tx_desc_sram_size = pd->tx_sram_size;
1993 static int phy_detect(struct mv643xx_eth_private *mp)
1998 smi_reg_read(mp, mp->phy_addr, 0, &data);
1999 smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
2001 smi_reg_read(mp, mp->phy_addr, 0, &data2);
2002 if (((data ^ data2) & 0x1000) == 0)
2005 smi_reg_write(mp, mp->phy_addr, 0, data);
2010 static int phy_init(struct mv643xx_eth_private *mp,
2011 struct mv643xx_eth_platform_data *pd)
2013 struct ethtool_cmd cmd;
2016 err = phy_detect(mp);
2018 dev_printk(KERN_INFO, &mp->dev->dev,
2019 "no PHY detected at addr %d\n", mp->phy_addr);
2024 mp->mii.phy_id = mp->phy_addr;
2025 mp->mii.phy_id_mask = 0x3f;
2026 mp->mii.reg_num_mask = 0x1f;
2027 mp->mii.dev = mp->dev;
2028 mp->mii.mdio_read = mv643xx_eth_mdio_read;
2029 mp->mii.mdio_write = mv643xx_eth_mdio_write;
2031 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2033 memset(&cmd, 0, sizeof(cmd));
2035 cmd.port = PORT_MII;
2036 cmd.transceiver = XCVR_INTERNAL;
2037 cmd.phy_address = mp->phy_addr;
2038 if (pd->speed == 0) {
2039 cmd.autoneg = AUTONEG_ENABLE;
2040 cmd.speed = SPEED_100;
2041 cmd.advertising = ADVERTISED_10baseT_Half |
2042 ADVERTISED_10baseT_Full |
2043 ADVERTISED_100baseT_Half |
2044 ADVERTISED_100baseT_Full;
2045 if (mp->mii.supports_gmii)
2046 cmd.advertising |= ADVERTISED_1000baseT_Full;
2048 cmd.autoneg = AUTONEG_DISABLE;
2049 cmd.speed = pd->speed;
2050 cmd.duplex = pd->duplex;
2053 update_pscr(mp, cmd.speed, cmd.duplex);
2054 mv643xx_eth_set_settings(mp->dev, &cmd);
2059 static int mv643xx_eth_probe(struct platform_device *pdev)
2061 struct mv643xx_eth_platform_data *pd;
2062 struct mv643xx_eth_private *mp;
2063 struct net_device *dev;
2064 struct resource *res;
2065 DECLARE_MAC_BUF(mac);
2068 pd = pdev->dev.platform_data;
2070 dev_printk(KERN_ERR, &pdev->dev,
2071 "no mv643xx_eth_platform_data\n");
2075 if (pd->shared == NULL) {
2076 dev_printk(KERN_ERR, &pdev->dev,
2077 "no mv643xx_eth_platform_data->shared\n");
2081 dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2085 mp = netdev_priv(dev);
2086 platform_set_drvdata(pdev, mp);
2088 mp->shared = platform_get_drvdata(pd->shared);
2089 mp->port_num = pd->port_number;
2092 #ifdef MV643XX_ETH_NAPI
2093 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2098 spin_lock_init(&mp->lock);
2100 mib_counters_clear(mp);
2101 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2103 err = phy_init(mp, pd);
2106 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2109 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2111 dev->irq = res->start;
2113 dev->hard_start_xmit = mv643xx_eth_xmit;
2114 dev->open = mv643xx_eth_open;
2115 dev->stop = mv643xx_eth_stop;
2116 dev->set_multicast_list = mv643xx_eth_set_rx_mode;
2117 dev->set_mac_address = mv643xx_eth_set_mac_address;
2118 dev->do_ioctl = mv643xx_eth_ioctl;
2119 dev->change_mtu = mv643xx_eth_change_mtu;
2120 dev->tx_timeout = mv643xx_eth_tx_timeout;
2121 #ifdef CONFIG_NET_POLL_CONTROLLER
2122 dev->poll_controller = mv643xx_eth_netpoll;
2124 dev->watchdog_timeo = 2 * HZ;
2127 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2129 * Zero copy can only work if we use Discovery II memory. Else, we will
2130 * have to map the buffers to ISA memory which is only 16 MB
2132 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2135 SET_NETDEV_DEV(dev, &pdev->dev);
2137 if (mp->shared->win_protect)
2138 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2140 err = register_netdev(dev);
2144 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
2145 mp->port_num, print_mac(mac, dev->dev_addr));
2147 if (dev->features & NETIF_F_SG)
2148 dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
2150 if (dev->features & NETIF_F_IP_CSUM)
2151 dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
2153 #ifdef MV643XX_ETH_NAPI
2154 dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2157 if (mp->tx_desc_sram_size > 0)
2158 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
2168 static int mv643xx_eth_remove(struct platform_device *pdev)
2170 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2172 unregister_netdev(mp->dev);
2173 flush_scheduled_work();
2174 free_netdev(mp->dev);
2176 platform_set_drvdata(pdev, NULL);
2181 static void mv643xx_eth_shutdown(struct platform_device *pdev)
2183 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2185 /* Mask all interrupts on ethernet port */
2186 wrl(mp, INT_MASK(mp->port_num), 0);
2187 rdl(mp, INT_MASK(mp->port_num));
2189 if (netif_running(mp->dev))
2193 static struct platform_driver mv643xx_eth_driver = {
2194 .probe = mv643xx_eth_probe,
2195 .remove = mv643xx_eth_remove,
2196 .shutdown = mv643xx_eth_shutdown,
2198 .name = MV643XX_ETH_NAME,
2199 .owner = THIS_MODULE,
2203 static int __init mv643xx_eth_init_module(void)
2207 rc = platform_driver_register(&mv643xx_eth_shared_driver);
2209 rc = platform_driver_register(&mv643xx_eth_driver);
2211 platform_driver_unregister(&mv643xx_eth_shared_driver);
2216 module_init(mv643xx_eth_init_module);
2218 static void __exit mv643xx_eth_cleanup_module(void)
2220 platform_driver_unregister(&mv643xx_eth_driver);
2221 platform_driver_unregister(&mv643xx_eth_shared_driver);
2223 module_exit(mv643xx_eth_cleanup_module);
2225 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani "
2226 "and Dale Farnsworth");
2227 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2228 MODULE_LICENSE("GPL");
2229 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
2230 MODULE_ALIAS("platform:" MV643XX_ETH_NAME);