2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/version.h>
24 #include <linux/init.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/mdio-bitbang.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/cache.h>
38 * Program the hardware MAC address from dev->dev_addr.
40 static void update_mac_address(struct net_device *ndev)
42 u32 ioaddr = ndev->base_addr;
44 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
45 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
47 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
52 * Get MAC address from SuperH MAC address register
54 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
55 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
56 * When you want use this device, you must set MAC address in bootloader.
59 static void read_mac_address(struct net_device *ndev)
61 u32 ioaddr = ndev->base_addr;
63 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
64 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
65 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
66 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
67 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
68 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
72 struct mdiobb_ctrl ctrl;
81 static void bb_set(u32 addr, u32 msk)
83 ctrl_outl(ctrl_inl(addr) | msk, addr);
87 static void bb_clr(u32 addr, u32 msk)
89 ctrl_outl((ctrl_inl(addr) & ~msk), addr);
93 static int bb_read(u32 addr, u32 msk)
95 return (ctrl_inl(addr) & msk) != 0;
98 /* Data I/O pin control */
99 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
101 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
103 bb_set(bitbang->addr, bitbang->mmd_msk);
105 bb_clr(bitbang->addr, bitbang->mmd_msk);
109 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
111 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
114 bb_set(bitbang->addr, bitbang->mdo_msk);
116 bb_clr(bitbang->addr, bitbang->mdo_msk);
120 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
122 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
123 return bb_read(bitbang->addr, bitbang->mdi_msk);
126 /* MDC pin control */
127 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
129 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
132 bb_set(bitbang->addr, bitbang->mdc_msk);
134 bb_clr(bitbang->addr, bitbang->mdc_msk);
137 /* mdio bus control struct */
138 static struct mdiobb_ops bb_ops = {
139 .owner = THIS_MODULE,
140 .set_mdc = sh_mdc_ctrl,
141 .set_mdio_dir = sh_mmd_ctrl,
142 .set_mdio_data = sh_set_mdio,
143 .get_mdio_data = sh_get_mdio,
147 static void sh_eth_reset(struct net_device *ndev)
149 u32 ioaddr = ndev->base_addr;
151 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
154 ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
155 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
157 if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
163 printk(KERN_ERR "Device reset fail\n");
166 ctrl_outl(0x0, ioaddr + TDLAR);
167 ctrl_outl(0x0, ioaddr + TDFAR);
168 ctrl_outl(0x0, ioaddr + TDFXR);
169 ctrl_outl(0x0, ioaddr + TDFFR);
170 ctrl_outl(0x0, ioaddr + RDLAR);
171 ctrl_outl(0x0, ioaddr + RDFAR);
172 ctrl_outl(0x0, ioaddr + RDFXR);
173 ctrl_outl(0x0, ioaddr + RDFFR);
175 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
177 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
181 /* free skb and descriptor buffer */
182 static void sh_eth_ring_free(struct net_device *ndev)
184 struct sh_eth_private *mdp = netdev_priv(ndev);
187 /* Free Rx skb ringbuffer */
188 if (mdp->rx_skbuff) {
189 for (i = 0; i < RX_RING_SIZE; i++) {
190 if (mdp->rx_skbuff[i])
191 dev_kfree_skb(mdp->rx_skbuff[i]);
194 kfree(mdp->rx_skbuff);
196 /* Free Tx skb ringbuffer */
197 if (mdp->tx_skbuff) {
198 for (i = 0; i < TX_RING_SIZE; i++) {
199 if (mdp->tx_skbuff[i])
200 dev_kfree_skb(mdp->tx_skbuff[i]);
203 kfree(mdp->tx_skbuff);
206 /* format skb and descriptor buffer */
207 static void sh_eth_ring_format(struct net_device *ndev)
209 u32 ioaddr = ndev->base_addr, reserve = 0;
210 struct sh_eth_private *mdp = netdev_priv(ndev);
213 struct sh_eth_rxdesc *rxdesc = NULL;
214 struct sh_eth_txdesc *txdesc = NULL;
215 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
216 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
218 mdp->cur_rx = mdp->cur_tx = 0;
219 mdp->dirty_rx = mdp->dirty_tx = 0;
221 memset(mdp->rx_ring, 0, rx_ringsize);
223 /* build Rx ring buffer */
224 for (i = 0; i < RX_RING_SIZE; i++) {
226 mdp->rx_skbuff[i] = NULL;
227 skb = dev_alloc_skb(mdp->rx_buf_sz);
228 mdp->rx_skbuff[i] = skb;
231 skb->dev = ndev; /* Mark as being used by this device. */
232 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
233 reserve = SH7763_SKB_ALIGN
234 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
236 skb_reserve(skb, reserve);
238 skb_reserve(skb, RX_OFFSET);
241 rxdesc = &mdp->rx_ring[i];
242 rxdesc->addr = (u32)skb->data & ~0x3UL;
243 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
245 /* The size of the buffer is 16 byte boundary. */
246 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
247 /* Rx descriptor address set */
249 ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
250 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
251 ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
256 /* Rx descriptor address set */
257 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
258 ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
259 ctrl_outl(0x1, ioaddr + RDFFR);
262 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
264 /* Mark the last entry as wrapping the ring. */
265 rxdesc->status |= cpu_to_le32(RD_RDEL);
267 memset(mdp->tx_ring, 0, tx_ringsize);
269 /* build Tx ring buffer */
270 for (i = 0; i < TX_RING_SIZE; i++) {
271 mdp->tx_skbuff[i] = NULL;
272 txdesc = &mdp->tx_ring[i];
273 txdesc->status = cpu_to_le32(TD_TFP);
274 txdesc->buffer_length = 0;
276 /* Rx descriptor address set */
277 ctrl_outl((u32)txdesc, ioaddr + TDLAR);
278 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
279 ctrl_outl((u32)txdesc, ioaddr + TDFAR);
284 /* Rx descriptor address set */
285 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
286 ctrl_outl((u32)txdesc, ioaddr + TDFXR);
287 ctrl_outl(0x1, ioaddr + TDFFR);
290 txdesc->status |= cpu_to_le32(TD_TDLE);
293 /* Get skb and descriptor buffer */
294 static int sh_eth_ring_init(struct net_device *ndev)
296 struct sh_eth_private *mdp = netdev_priv(ndev);
297 int rx_ringsize, tx_ringsize, ret = 0;
300 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
301 * card needs room to do 8 byte alignment, +2 so we can reserve
302 * the first 2 bytes, and +16 gets room for the status word from the
305 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
306 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
308 /* Allocate RX and TX skb rings */
309 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
311 if (!mdp->rx_skbuff) {
312 printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name);
317 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
319 if (!mdp->tx_skbuff) {
320 printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name);
325 /* Allocate all Rx descriptors. */
326 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
327 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
331 printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
332 ndev->name, rx_ringsize);
339 /* Allocate all Tx descriptors. */
340 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
341 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
344 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
345 ndev->name, tx_ringsize);
352 /* free DMA buffer */
353 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
356 /* Free Rx and Tx skb ring buffer */
357 sh_eth_ring_free(ndev);
362 static int sh_eth_dev_init(struct net_device *ndev)
365 struct sh_eth_private *mdp = netdev_priv(ndev);
366 u32 ioaddr = ndev->base_addr;
367 u_int32_t rx_int_var, tx_int_var;
373 /* Descriptor format */
374 sh_eth_ring_format(ndev);
375 ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);
377 /* all sh_eth int mask */
378 ctrl_outl(0, ioaddr + EESIPR);
380 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
381 ctrl_outl(EDMR_EL, ioaddr + EDMR);
383 ctrl_outl(0, ioaddr + EDMR); /* Endian change */
387 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
388 ctrl_outl(0, ioaddr + TFTR);
390 /* Frame recv control */
391 ctrl_outl(0, ioaddr + RMCR);
393 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
394 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
395 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
397 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
398 /* Burst sycle set */
399 ctrl_outl(0x800, ioaddr + BCULR);
402 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
404 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
405 ctrl_outl(0, ioaddr + TRIMD);
408 /* Recv frame limit set register */
409 ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
411 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
412 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
414 /* PAUSE Prohibition */
415 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
416 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
418 ctrl_outl(val, ioaddr + ECMR);
420 /* E-MAC Status Register clear */
421 ctrl_outl(ECSR_INIT, ioaddr + ECSR);
423 /* E-MAC Interrupt Enable register */
424 ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);
426 /* Set MAC address */
427 update_mac_address(ndev);
430 #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
431 ctrl_outl(APR_AP, ioaddr + APR);
432 ctrl_outl(MPR_MP, ioaddr + MPR);
433 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
435 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
436 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
439 /* Setting the Rx mode will start the Rx process. */
440 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
442 netif_start_queue(ndev);
447 /* free Tx skb function */
448 static int sh_eth_txfree(struct net_device *ndev)
450 struct sh_eth_private *mdp = netdev_priv(ndev);
451 struct sh_eth_txdesc *txdesc;
455 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
456 entry = mdp->dirty_tx % TX_RING_SIZE;
457 txdesc = &mdp->tx_ring[entry];
458 if (txdesc->status & cpu_to_le32(TD_TACT))
460 /* Free the original skb. */
461 if (mdp->tx_skbuff[entry]) {
462 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
463 mdp->tx_skbuff[entry] = NULL;
466 txdesc->status = cpu_to_le32(TD_TFP);
467 if (entry >= TX_RING_SIZE - 1)
468 txdesc->status |= cpu_to_le32(TD_TDLE);
470 mdp->stats.tx_packets++;
471 mdp->stats.tx_bytes += txdesc->buffer_length;
476 /* Packet receive function */
477 static int sh_eth_rx(struct net_device *ndev)
479 struct sh_eth_private *mdp = netdev_priv(ndev);
480 struct sh_eth_rxdesc *rxdesc;
482 int entry = mdp->cur_rx % RX_RING_SIZE;
483 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
486 u32 desc_status, reserve = 0;
488 rxdesc = &mdp->rx_ring[entry];
489 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
490 desc_status = le32_to_cpu(rxdesc->status);
491 pkt_len = rxdesc->frame_length;
496 if (!(desc_status & RDFEND))
497 mdp->stats.rx_length_errors++;
499 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
500 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
501 mdp->stats.rx_errors++;
502 if (desc_status & RD_RFS1)
503 mdp->stats.rx_crc_errors++;
504 if (desc_status & RD_RFS2)
505 mdp->stats.rx_frame_errors++;
506 if (desc_status & RD_RFS3)
507 mdp->stats.rx_length_errors++;
508 if (desc_status & RD_RFS4)
509 mdp->stats.rx_length_errors++;
510 if (desc_status & RD_RFS6)
511 mdp->stats.rx_missed_errors++;
512 if (desc_status & RD_RFS10)
513 mdp->stats.rx_over_errors++;
515 swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2);
516 skb = mdp->rx_skbuff[entry];
517 mdp->rx_skbuff[entry] = NULL;
518 skb_put(skb, pkt_len);
519 skb->protocol = eth_type_trans(skb, ndev);
521 ndev->last_rx = jiffies;
522 mdp->stats.rx_packets++;
523 mdp->stats.rx_bytes += pkt_len;
525 rxdesc->status |= cpu_to_le32(RD_RACT);
526 entry = (++mdp->cur_rx) % RX_RING_SIZE;
529 /* Refill the Rx ring buffers. */
530 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
531 entry = mdp->dirty_rx % RX_RING_SIZE;
532 rxdesc = &mdp->rx_ring[entry];
533 /* The size of the buffer is 16 byte boundary. */
534 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
536 if (mdp->rx_skbuff[entry] == NULL) {
537 skb = dev_alloc_skb(mdp->rx_buf_sz);
538 mdp->rx_skbuff[entry] = skb;
540 break; /* Better luck next round. */
542 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
543 reserve = SH7763_SKB_ALIGN
544 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
546 skb_reserve(skb, reserve);
548 skb_reserve(skb, RX_OFFSET);
550 skb->ip_summed = CHECKSUM_NONE;
551 rxdesc->addr = (u32)skb->data & ~0x3UL;
553 if (entry >= RX_RING_SIZE - 1)
555 cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL);
558 cpu_to_le32(RD_RACT | RD_RFP);
561 /* Restart Rx engine if stopped. */
562 /* If we don't need to check status, don't. -KDU */
563 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
564 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
569 /* error control function */
570 static void sh_eth_error(struct net_device *ndev, int intr_status)
572 struct sh_eth_private *mdp = netdev_priv(ndev);
573 u32 ioaddr = ndev->base_addr;
576 if (intr_status & EESR_ECI) {
577 felic_stat = ctrl_inl(ioaddr + ECSR);
578 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
579 if (felic_stat & ECSR_ICD)
580 mdp->stats.tx_carrier_errors++;
581 if (felic_stat & ECSR_LCHNG) {
583 u32 link_stat = (ctrl_inl(ioaddr + PSR));
584 if (!(link_stat & PHY_ST_LINK)) {
585 /* Link Down : disable tx and rx */
586 ctrl_outl(ctrl_inl(ioaddr + ECMR) &
587 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
590 ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
591 ~DMAC_M_ECI, ioaddr + EESIPR);
593 ctrl_outl(ctrl_inl(ioaddr + ECSR),
595 ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
596 DMAC_M_ECI, ioaddr + EESIPR);
597 /* enable tx and rx */
598 ctrl_outl(ctrl_inl(ioaddr + ECMR) |
599 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
604 if (intr_status & EESR_TWB) {
605 /* Write buck end. unused write back interrupt */
606 if (intr_status & EESR_TABT) /* Transmit Abort int */
607 mdp->stats.tx_aborted_errors++;
610 if (intr_status & EESR_RABT) {
611 /* Receive Abort int */
612 if (intr_status & EESR_RFRMER) {
613 /* Receive Frame Overflow int */
614 mdp->stats.rx_frame_errors++;
615 printk(KERN_ERR "Receive Frame Overflow\n");
618 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
619 if (intr_status & EESR_ADE) {
620 if (intr_status & EESR_TDE) {
621 if (intr_status & EESR_TFE)
622 mdp->stats.tx_fifo_errors++;
627 if (intr_status & EESR_RDE) {
628 /* Receive Descriptor Empty int */
629 mdp->stats.rx_over_errors++;
631 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
632 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
633 printk(KERN_ERR "Receive Descriptor Empty\n");
635 if (intr_status & EESR_RFE) {
636 /* Receive FIFO Overflow int */
637 mdp->stats.rx_fifo_errors++;
638 printk(KERN_ERR "Receive FIFO Overflow\n");
640 if (intr_status & (EESR_TWB | EESR_TABT |
641 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
644 EESR_TDE | EESR_TFE)) {
646 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
648 printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ",
649 ndev->name, intr_status, mdp->cur_tx);
650 printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
651 mdp->dirty_tx, (u32) ndev->state, edtrr);
652 /* dirty buffer free */
656 if (edtrr ^ EDTRR_TRNS) {
658 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
661 netif_wake_queue(ndev);
665 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
667 struct net_device *ndev = netdev;
668 struct sh_eth_private *mdp = netdev_priv(ndev);
669 u32 ioaddr, boguscnt = RX_RING_SIZE;
672 ioaddr = ndev->base_addr;
673 spin_lock(&mdp->lock);
675 /* Get interrpt stat */
676 intr_status = ctrl_inl(ioaddr + EESR);
677 /* Clear interrupt */
678 ctrl_outl(intr_status, ioaddr + EESR);
680 if (intr_status & (EESR_FRC | /* Frame recv*/
681 EESR_RMAF | /* Multi cast address recv*/
682 EESR_RRF | /* Bit frame recv */
683 EESR_RTLF | /* Long frame recv*/
684 EESR_RTSF | /* short frame recv */
685 EESR_PRE | /* PHY-LSI recv error */
686 EESR_CERF)){ /* recv frame CRC error */
691 if (intr_status & TX_CHECK) {
693 netif_wake_queue(ndev);
696 if (intr_status & EESR_ERR_CHECK)
697 sh_eth_error(ndev, intr_status);
699 if (--boguscnt < 0) {
701 "%s: Too much work at interrupt, status=0x%4.4x.\n",
702 ndev->name, intr_status);
705 spin_unlock(&mdp->lock);
710 static void sh_eth_timer(unsigned long data)
712 struct net_device *ndev = (struct net_device *)data;
713 struct sh_eth_private *mdp = netdev_priv(ndev);
715 mod_timer(&mdp->timer, jiffies + (10 * HZ));
718 /* PHY state control function */
719 static void sh_eth_adjust_link(struct net_device *ndev)
721 struct sh_eth_private *mdp = netdev_priv(ndev);
722 struct phy_device *phydev = mdp->phydev;
723 u32 ioaddr = ndev->base_addr;
726 if (phydev->link != PHY_DOWN) {
727 if (phydev->duplex != mdp->duplex) {
729 mdp->duplex = phydev->duplex;
730 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
731 if (mdp->duplex) { /* FULL */
732 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM,
735 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM,
741 if (phydev->speed != mdp->speed) {
743 mdp->speed = phydev->speed;
744 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
745 switch (mdp->speed) {
746 case 10: /* 10BASE */
747 ctrl_outl(GECMR_10, ioaddr + GECMR); break;
748 case 100:/* 100BASE */
749 ctrl_outl(GECMR_100, ioaddr + GECMR); break;
750 case 1000: /* 1000BASE */
751 ctrl_outl(GECMR_1000, ioaddr + GECMR); break;
757 if (mdp->link == PHY_DOWN) {
758 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
759 | ECMR_DM, ioaddr + ECMR);
761 mdp->link = phydev->link;
763 } else if (mdp->link) {
765 mdp->link = PHY_DOWN;
771 phy_print_status(phydev);
774 /* PHY init function */
775 static int sh_eth_phy_init(struct net_device *ndev)
777 struct sh_eth_private *mdp = netdev_priv(ndev);
778 char phy_id[BUS_ID_SIZE];
779 struct phy_device *phydev = NULL;
781 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
782 mdp->mii_bus->id , mdp->phy_id);
784 mdp->link = PHY_DOWN;
788 /* Try connect to PHY */
789 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
790 0, PHY_INTERFACE_MODE_MII);
791 if (IS_ERR(phydev)) {
792 dev_err(&ndev->dev, "phy_connect failed\n");
793 return PTR_ERR(phydev);
795 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
796 phydev->addr, phydev->drv->name);
798 mdp->phydev = phydev;
803 /* PHY control start function */
804 static int sh_eth_phy_start(struct net_device *ndev)
806 struct sh_eth_private *mdp = netdev_priv(ndev);
809 ret = sh_eth_phy_init(ndev);
813 /* reset phy - this also wakes it from PDOWN */
814 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
815 phy_start(mdp->phydev);
820 /* network device open function */
821 static int sh_eth_open(struct net_device *ndev)
824 struct sh_eth_private *mdp = netdev_priv(ndev);
826 ret = request_irq(ndev->irq, &sh_eth_interrupt, 0, ndev->name, ndev);
828 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME);
833 ret = sh_eth_ring_init(ndev);
838 ret = sh_eth_dev_init(ndev);
842 /* PHY control start*/
843 ret = sh_eth_phy_start(ndev);
847 /* Set the timer to check for link beat. */
848 init_timer(&mdp->timer);
849 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
850 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
855 free_irq(ndev->irq, ndev);
859 /* Timeout function */
860 static void sh_eth_tx_timeout(struct net_device *ndev)
862 struct sh_eth_private *mdp = netdev_priv(ndev);
863 u32 ioaddr = ndev->base_addr;
864 struct sh_eth_rxdesc *rxdesc;
867 netif_stop_queue(ndev);
869 /* worning message out. */
870 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
871 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
873 /* tx_errors count up */
874 mdp->stats.tx_errors++;
877 del_timer_sync(&mdp->timer);
879 /* Free all the skbuffs in the Rx queue. */
880 for (i = 0; i < RX_RING_SIZE; i++) {
881 rxdesc = &mdp->rx_ring[i];
883 rxdesc->addr = 0xBADF00D0;
884 if (mdp->rx_skbuff[i])
885 dev_kfree_skb(mdp->rx_skbuff[i]);
886 mdp->rx_skbuff[i] = NULL;
888 for (i = 0; i < TX_RING_SIZE; i++) {
889 if (mdp->tx_skbuff[i])
890 dev_kfree_skb(mdp->tx_skbuff[i]);
891 mdp->tx_skbuff[i] = NULL;
895 sh_eth_dev_init(ndev);
898 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
899 add_timer(&mdp->timer);
902 /* Packet transmit function */
903 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
905 struct sh_eth_private *mdp = netdev_priv(ndev);
906 struct sh_eth_txdesc *txdesc;
910 spin_lock_irqsave(&mdp->lock, flags);
911 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
912 if (!sh_eth_txfree(ndev)) {
913 netif_stop_queue(ndev);
914 spin_unlock_irqrestore(&mdp->lock, flags);
918 spin_unlock_irqrestore(&mdp->lock, flags);
920 entry = mdp->cur_tx % TX_RING_SIZE;
921 mdp->tx_skbuff[entry] = skb;
922 txdesc = &mdp->tx_ring[entry];
923 txdesc->addr = (u32)(skb->data);
925 swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
927 __flush_purge_region(skb->data, skb->len);
928 if (skb->len < ETHERSMALL)
929 txdesc->buffer_length = ETHERSMALL;
931 txdesc->buffer_length = skb->len;
933 if (entry >= TX_RING_SIZE - 1)
934 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
936 txdesc->status |= cpu_to_le32(TD_TACT);
940 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
941 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
943 ndev->trans_start = jiffies;
948 /* device close function */
949 static int sh_eth_close(struct net_device *ndev)
951 struct sh_eth_private *mdp = netdev_priv(ndev);
952 u32 ioaddr = ndev->base_addr;
955 netif_stop_queue(ndev);
957 /* Disable interrupts by clearing the interrupt mask. */
958 ctrl_outl(0x0000, ioaddr + EESIPR);
960 /* Stop the chip's Tx and Rx processes. */
961 ctrl_outl(0, ioaddr + EDTRR);
962 ctrl_outl(0, ioaddr + EDRRR);
966 phy_stop(mdp->phydev);
967 phy_disconnect(mdp->phydev);
970 free_irq(ndev->irq, ndev);
972 del_timer_sync(&mdp->timer);
974 /* Free all the skbuffs in the Rx queue. */
975 sh_eth_ring_free(ndev);
977 /* free DMA buffer */
978 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
979 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
981 /* free DMA buffer */
982 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
983 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
988 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
990 struct sh_eth_private *mdp = netdev_priv(ndev);
991 u32 ioaddr = ndev->base_addr;
993 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
994 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
995 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
996 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
997 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
998 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
999 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1000 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
1001 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
1002 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
1003 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
1005 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1006 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
1011 /* ioctl to device funciotn*/
1012 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1015 struct sh_eth_private *mdp = netdev_priv(ndev);
1016 struct phy_device *phydev = mdp->phydev;
1018 if (!netif_running(ndev))
1024 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1028 /* Multicast reception directions set */
1029 static void sh_eth_set_multicast_list(struct net_device *ndev)
1031 u32 ioaddr = ndev->base_addr;
1033 if (ndev->flags & IFF_PROMISC) {
1034 /* Set promiscuous. */
1035 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1038 /* Normal, unicast/broadcast-only mode. */
1039 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1044 /* SuperH's TSU register init function */
1045 static void sh_eth_tsu_init(u32 ioaddr)
1047 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
1048 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
1049 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
1050 ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
1051 ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
1052 ctrl_outl(0, ioaddr + TSU_PRISL0);
1053 ctrl_outl(0, ioaddr + TSU_PRISL1);
1054 ctrl_outl(0, ioaddr + TSU_FWSL0);
1055 ctrl_outl(0, ioaddr + TSU_FWSL1);
1056 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1057 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1058 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1059 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1061 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1062 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1064 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
1065 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1066 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
1067 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
1068 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
1069 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
1070 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
1073 /* MDIO bus release function */
1074 static int sh_mdio_release(struct net_device *ndev)
1076 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1078 /* unregister mdio bus */
1079 mdiobus_unregister(bus);
1081 /* remove mdio bus info from net_device */
1082 dev_set_drvdata(&ndev->dev, NULL);
1084 /* free bitbang info */
1085 free_mdio_bitbang(bus);
1090 /* MDIO bus init function */
1091 static int sh_mdio_init(struct net_device *ndev, int id)
1094 struct bb_info *bitbang;
1095 struct sh_eth_private *mdp = netdev_priv(ndev);
1097 /* create bit control struct for PHY */
1098 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1105 bitbang->addr = ndev->base_addr + PIR;
1106 bitbang->mdi_msk = 0x08;
1107 bitbang->mdo_msk = 0x04;
1108 bitbang->mmd_msk = 0x02;/* MMD */
1109 bitbang->mdc_msk = 0x01;
1110 bitbang->ctrl.ops = &bb_ops;
1112 /* MII contorller setting */
1113 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1114 if (!mdp->mii_bus) {
1116 goto out_free_bitbang;
1119 /* Hook up MII support for ethtool */
1120 mdp->mii_bus->name = "sh_mii";
1121 mdp->mii_bus->dev = &ndev->dev;
1122 mdp->mii_bus->id[0] = id;
1125 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1126 if (!mdp->mii_bus->irq) {
1131 for (i = 0; i < PHY_MAX_ADDR; i++)
1132 mdp->mii_bus->irq[i] = PHY_POLL;
1134 /* regist mdio bus */
1135 ret = mdiobus_register(mdp->mii_bus);
1139 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1144 kfree(mdp->mii_bus->irq);
1147 kfree(mdp->mii_bus);
1156 static int sh_eth_drv_probe(struct platform_device *pdev)
1158 int ret, i, devno = 0;
1159 struct resource *res;
1160 struct net_device *ndev = NULL;
1161 struct sh_eth_private *mdp;
1164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1165 if (unlikely(res == NULL)) {
1166 dev_err(&pdev->dev, "invalid resource\n");
1171 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1173 printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME);
1178 /* The sh Ether-specific entries in the device structure. */
1179 ndev->base_addr = res->start;
1185 ndev->irq = platform_get_irq(pdev, 0);
1186 if (ndev->irq < 0) {
1191 SET_NETDEV_DEV(ndev, &pdev->dev);
1193 /* Fill in the fields of the device structure with ethernet values. */
1196 mdp = netdev_priv(ndev);
1197 spin_lock_init(&mdp->lock);
1200 mdp->phy_id = (int)pdev->dev.platform_data;
1203 ndev->open = sh_eth_open;
1204 ndev->hard_start_xmit = sh_eth_start_xmit;
1205 ndev->stop = sh_eth_close;
1206 ndev->get_stats = sh_eth_get_stats;
1207 ndev->set_multicast_list = sh_eth_set_multicast_list;
1208 ndev->do_ioctl = sh_eth_do_ioctl;
1209 ndev->tx_timeout = sh_eth_tx_timeout;
1210 ndev->watchdog_timeo = TX_TIMEOUT;
1212 mdp->post_rx = POST_RX >> (devno << 1);
1213 mdp->post_fw = POST_FW >> (devno << 1);
1215 /* read and set MAC address */
1216 read_mac_address(ndev);
1218 /* First device only init */
1221 ctrl_outl(ARSTR_ARSTR, ARSTR);
1224 /* TSU init (Init only)*/
1225 sh_eth_tsu_init(SH_TSU_ADDR);
1228 /* network device register */
1229 ret = register_netdev(ndev);
1234 ret = sh_mdio_init(ndev, pdev->id);
1236 goto out_unregister;
1238 /* pritnt device infomation */
1239 printk(KERN_INFO "%s: %s at 0x%x, ",
1240 ndev->name, CARDNAME, (u32) ndev->base_addr);
1242 for (i = 0; i < 5; i++)
1243 printk(KERN_INFO "%02X:", ndev->dev_addr[i]);
1244 printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1246 platform_set_drvdata(pdev, ndev);
1251 unregister_netdev(ndev);
1262 static int sh_eth_drv_remove(struct platform_device *pdev)
1264 struct net_device *ndev = platform_get_drvdata(pdev);
1266 sh_mdio_release(ndev);
1267 unregister_netdev(ndev);
1268 flush_scheduled_work();
1271 platform_set_drvdata(pdev, NULL);
1276 static struct platform_driver sh_eth_driver = {
1277 .probe = sh_eth_drv_probe,
1278 .remove = sh_eth_drv_remove,
1284 static int __init sh_eth_init(void)
1286 return platform_driver_register(&sh_eth_driver);
1289 static void __exit sh_eth_cleanup(void)
1291 platform_driver_unregister(&sh_eth_driver);
1294 module_init(sh_eth_init);
1295 module_exit(sh_eth_cleanup);
1297 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1298 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1299 MODULE_LICENSE("GPL v2");