]> err.no Git - linux-2.6/blob - drivers/net/bfin_mac.c
Merge branch 'devel'
[linux-2.6] / drivers / net / bfin_mac.c
1 /*
2  * Blackfin On-Chip MAC Driver
3  *
4  * Copyright 2004-2007 Analog Devices Inc.
5  *
6  * Enter bugs at http://blackfin.uclinux.org/
7  *
8  * Licensed under the GPL-2 or later.
9  */
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/irq.h>
20 #include <linux/io.h>
21 #include <linux/ioport.h>
22 #include <linux/crc32.h>
23 #include <linux/device.h>
24 #include <linux/spinlock.h>
25 #include <linux/ethtool.h>
26 #include <linux/mii.h>
27 #include <linux/phy.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/platform_device.h>
32
33 #include <asm/dma.h>
34 #include <linux/dma-mapping.h>
35
36 #include <asm/blackfin.h>
37 #include <asm/cacheflush.h>
38 #include <asm/portmux.h>
39
40 #include "bfin_mac.h"
41
42 #define DRV_NAME        "bfin_mac"
43 #define DRV_VERSION     "1.1"
44 #define DRV_AUTHOR      "Bryan Wu, Luke Yang"
45 #define DRV_DESC        "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
46
47 MODULE_AUTHOR(DRV_AUTHOR);
48 MODULE_LICENSE("GPL");
49 MODULE_DESCRIPTION(DRV_DESC);
50
51 #if defined(CONFIG_BFIN_MAC_USE_L1)
52 # define bfin_mac_alloc(dma_handle, size)  l1_data_sram_zalloc(size)
53 # define bfin_mac_free(dma_handle, ptr)    l1_data_sram_free(ptr)
54 #else
55 # define bfin_mac_alloc(dma_handle, size) \
56         dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
57 # define bfin_mac_free(dma_handle, ptr) \
58         dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
59 #endif
60
61 #define PKT_BUF_SZ 1580
62
63 #define MAX_TIMEOUT_CNT 500
64
65 /* pointers to maintain transmit list */
66 static struct net_dma_desc_tx *tx_list_head;
67 static struct net_dma_desc_tx *tx_list_tail;
68 static struct net_dma_desc_rx *rx_list_head;
69 static struct net_dma_desc_rx *rx_list_tail;
70 static struct net_dma_desc_rx *current_rx_ptr;
71 static struct net_dma_desc_tx *current_tx_ptr;
72 static struct net_dma_desc_tx *tx_desc;
73 static struct net_dma_desc_rx *rx_desc;
74
75 static void bf537mac_disable(void);
76 static void bf537mac_enable(void);
77
78 static void desc_list_free(void)
79 {
80         struct net_dma_desc_rx *r;
81         struct net_dma_desc_tx *t;
82         int i;
83 #if !defined(CONFIG_BFIN_MAC_USE_L1)
84         dma_addr_t dma_handle = 0;
85 #endif
86
87         if (tx_desc) {
88                 t = tx_list_head;
89                 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
90                         if (t) {
91                                 if (t->skb) {
92                                         dev_kfree_skb(t->skb);
93                                         t->skb = NULL;
94                                 }
95                                 t = t->next;
96                         }
97                 }
98                 bfin_mac_free(dma_handle, tx_desc);
99         }
100
101         if (rx_desc) {
102                 r = rx_list_head;
103                 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
104                         if (r) {
105                                 if (r->skb) {
106                                         dev_kfree_skb(r->skb);
107                                         r->skb = NULL;
108                                 }
109                                 r = r->next;
110                         }
111                 }
112                 bfin_mac_free(dma_handle, rx_desc);
113         }
114 }
115
116 static int desc_list_init(void)
117 {
118         int i;
119         struct sk_buff *new_skb;
120 #if !defined(CONFIG_BFIN_MAC_USE_L1)
121         /*
122          * This dma_handle is useless in Blackfin dma_alloc_coherent().
123          * The real dma handler is the return value of dma_alloc_coherent().
124          */
125         dma_addr_t dma_handle;
126 #endif
127
128         tx_desc = bfin_mac_alloc(&dma_handle,
129                                 sizeof(struct net_dma_desc_tx) *
130                                 CONFIG_BFIN_TX_DESC_NUM);
131         if (tx_desc == NULL)
132                 goto init_error;
133
134         rx_desc = bfin_mac_alloc(&dma_handle,
135                                 sizeof(struct net_dma_desc_rx) *
136                                 CONFIG_BFIN_RX_DESC_NUM);
137         if (rx_desc == NULL)
138                 goto init_error;
139
140         /* init tx_list */
141         tx_list_head = tx_list_tail = tx_desc;
142
143         for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
144                 struct net_dma_desc_tx *t = tx_desc + i;
145                 struct dma_descriptor *a = &(t->desc_a);
146                 struct dma_descriptor *b = &(t->desc_b);
147
148                 /*
149                  * disable DMA
150                  * read from memory WNR = 0
151                  * wordsize is 32 bits
152                  * 6 half words is desc size
153                  * large desc flow
154                  */
155                 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
156                 a->start_addr = (unsigned long)t->packet;
157                 a->x_count = 0;
158                 a->next_dma_desc = b;
159
160                 /*
161                  * enabled DMA
162                  * write to memory WNR = 1
163                  * wordsize is 32 bits
164                  * disable interrupt
165                  * 6 half words is desc size
166                  * large desc flow
167                  */
168                 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
169                 b->start_addr = (unsigned long)(&(t->status));
170                 b->x_count = 0;
171
172                 t->skb = NULL;
173                 tx_list_tail->desc_b.next_dma_desc = a;
174                 tx_list_tail->next = t;
175                 tx_list_tail = t;
176         }
177         tx_list_tail->next = tx_list_head;      /* tx_list is a circle */
178         tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
179         current_tx_ptr = tx_list_head;
180
181         /* init rx_list */
182         rx_list_head = rx_list_tail = rx_desc;
183
184         for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
185                 struct net_dma_desc_rx *r = rx_desc + i;
186                 struct dma_descriptor *a = &(r->desc_a);
187                 struct dma_descriptor *b = &(r->desc_b);
188
189                 /* allocate a new skb for next time receive */
190                 new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
191                 if (!new_skb) {
192                         printk(KERN_NOTICE DRV_NAME
193                                ": init: low on mem - packet dropped\n");
194                         goto init_error;
195                 }
196                 skb_reserve(new_skb, 2);
197                 r->skb = new_skb;
198
199                 /*
200                  * enabled DMA
201                  * write to memory WNR = 1
202                  * wordsize is 32 bits
203                  * disable interrupt
204                  * 6 half words is desc size
205                  * large desc flow
206                  */
207                 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
208                 /* since RXDWA is enabled */
209                 a->start_addr = (unsigned long)new_skb->data - 2;
210                 a->x_count = 0;
211                 a->next_dma_desc = b;
212
213                 /*
214                  * enabled DMA
215                  * write to memory WNR = 1
216                  * wordsize is 32 bits
217                  * enable interrupt
218                  * 6 half words is desc size
219                  * large desc flow
220                  */
221                 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
222                                 NDSIZE_6 | DMAFLOW_LARGE;
223                 b->start_addr = (unsigned long)(&(r->status));
224                 b->x_count = 0;
225
226                 rx_list_tail->desc_b.next_dma_desc = a;
227                 rx_list_tail->next = r;
228                 rx_list_tail = r;
229         }
230         rx_list_tail->next = rx_list_head;      /* rx_list is a circle */
231         rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
232         current_rx_ptr = rx_list_head;
233
234         return 0;
235
236 init_error:
237         desc_list_free();
238         printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
239         return -ENOMEM;
240 }
241
242
243 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
244
245 /* Set FER regs to MUX in Ethernet pins */
246 static int setup_pin_mux(int action)
247 {
248 #if defined(CONFIG_BFIN_MAC_RMII)
249         u16 pin_req[] = P_RMII0;
250 #else
251         u16 pin_req[] = P_MII0;
252 #endif
253
254         if (action) {
255                 if (peripheral_request_list(pin_req, DRV_NAME)) {
256                         printk(KERN_ERR DRV_NAME
257                         ": Requesting Peripherals failed\n");
258                         return -EFAULT;
259                 }
260         } else
261                 peripheral_free_list(pin_req);
262
263         return 0;
264 }
265
266 /*
267  * MII operations
268  */
269 /* Wait until the previous MDC/MDIO transaction has completed */
270 static void mdio_poll(void)
271 {
272         int timeout_cnt = MAX_TIMEOUT_CNT;
273
274         /* poll the STABUSY bit */
275         while ((bfin_read_EMAC_STAADD()) & STABUSY) {
276                 udelay(1);
277                 if (timeout_cnt-- < 0) {
278                         printk(KERN_ERR DRV_NAME
279                         ": wait MDC/MDIO transaction to complete timeout\n");
280                         break;
281                 }
282         }
283 }
284
285 /* Read an off-chip register in a PHY through the MDC/MDIO port */
286 static int mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
287 {
288         mdio_poll();
289
290         /* read mode */
291         bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
292                                 SET_REGAD((u16) regnum) |
293                                 STABUSY);
294
295         mdio_poll();
296
297         return (int) bfin_read_EMAC_STADAT();
298 }
299
300 /* Write an off-chip register in a PHY through the MDC/MDIO port */
301 static int mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
302                          u16 value)
303 {
304         mdio_poll();
305
306         bfin_write_EMAC_STADAT((u32) value);
307
308         /* write mode */
309         bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
310                                 SET_REGAD((u16) regnum) |
311                                 STAOP |
312                                 STABUSY);
313
314         mdio_poll();
315
316         return 0;
317 }
318
319 static int mdiobus_reset(struct mii_bus *bus)
320 {
321         return 0;
322 }
323
324 static void bf537_adjust_link(struct net_device *dev)
325 {
326         struct bf537mac_local *lp = netdev_priv(dev);
327         struct phy_device *phydev = lp->phydev;
328         unsigned long flags;
329         int new_state = 0;
330
331         spin_lock_irqsave(&lp->lock, flags);
332         if (phydev->link) {
333                 /* Now we make sure that we can be in full duplex mode.
334                  * If not, we operate in half-duplex mode. */
335                 if (phydev->duplex != lp->old_duplex) {
336                         u32 opmode = bfin_read_EMAC_OPMODE();
337                         new_state = 1;
338
339                         if (phydev->duplex)
340                                 opmode |= FDMODE;
341                         else
342                                 opmode &= ~(FDMODE);
343
344                         bfin_write_EMAC_OPMODE(opmode);
345                         lp->old_duplex = phydev->duplex;
346                 }
347
348                 if (phydev->speed != lp->old_speed) {
349 #if defined(CONFIG_BFIN_MAC_RMII)
350                         u32 opmode = bfin_read_EMAC_OPMODE();
351                         switch (phydev->speed) {
352                         case 10:
353                                 opmode |= RMII_10;
354                                 break;
355                         case 100:
356                                 opmode &= ~(RMII_10);
357                                 break;
358                         default:
359                                 printk(KERN_WARNING
360                                         "%s: Ack!  Speed (%d) is not 10/100!\n",
361                                         DRV_NAME, phydev->speed);
362                                 break;
363                         }
364                         bfin_write_EMAC_OPMODE(opmode);
365 #endif
366
367                         new_state = 1;
368                         lp->old_speed = phydev->speed;
369                 }
370
371                 if (!lp->old_link) {
372                         new_state = 1;
373                         lp->old_link = 1;
374                         netif_schedule(dev);
375                 }
376         } else if (lp->old_link) {
377                 new_state = 1;
378                 lp->old_link = 0;
379                 lp->old_speed = 0;
380                 lp->old_duplex = -1;
381         }
382
383         if (new_state) {
384                 u32 opmode = bfin_read_EMAC_OPMODE();
385                 phy_print_status(phydev);
386                 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
387         }
388
389         spin_unlock_irqrestore(&lp->lock, flags);
390 }
391
392 /* MDC  = 2.5 MHz */
393 #define MDC_CLK 2500000
394
395 static int mii_probe(struct net_device *dev)
396 {
397         struct bf537mac_local *lp = netdev_priv(dev);
398         struct phy_device *phydev = NULL;
399         unsigned short sysctl;
400         int i;
401         u32 sclk, mdc_div;
402
403         /* Enable PHY output early */
404         if (!(bfin_read_VR_CTL() & PHYCLKOE))
405                 bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
406
407         sclk = get_sclk();
408         mdc_div = ((sclk / MDC_CLK) / 2) - 1;
409
410         sysctl = bfin_read_EMAC_SYSCTL();
411         sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
412         bfin_write_EMAC_SYSCTL(sysctl);
413
414         /* search for connect PHY device */
415         for (i = 0; i < PHY_MAX_ADDR; i++) {
416                 struct phy_device *const tmp_phydev = lp->mii_bus.phy_map[i];
417
418                 if (!tmp_phydev)
419                         continue; /* no PHY here... */
420
421                 phydev = tmp_phydev;
422                 break; /* found it */
423         }
424
425         /* now we are supposed to have a proper phydev, to attach to... */
426         if (!phydev) {
427                 printk(KERN_INFO "%s: Don't found any phy device at all\n",
428                         dev->name);
429                 return -ENODEV;
430         }
431
432 #if defined(CONFIG_BFIN_MAC_RMII)
433         phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
434                         PHY_INTERFACE_MODE_RMII);
435 #else
436         phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
437                         PHY_INTERFACE_MODE_MII);
438 #endif
439
440         if (IS_ERR(phydev)) {
441                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
442                 return PTR_ERR(phydev);
443         }
444
445         /* mask with MAC supported features */
446         phydev->supported &= (SUPPORTED_10baseT_Half
447                               | SUPPORTED_10baseT_Full
448                               | SUPPORTED_100baseT_Half
449                               | SUPPORTED_100baseT_Full
450                               | SUPPORTED_Autoneg
451                               | SUPPORTED_Pause | SUPPORTED_Asym_Pause
452                               | SUPPORTED_MII
453                               | SUPPORTED_TP);
454
455         phydev->advertising = phydev->supported;
456
457         lp->old_link = 0;
458         lp->old_speed = 0;
459         lp->old_duplex = -1;
460         lp->phydev = phydev;
461
462         printk(KERN_INFO "%s: attached PHY driver [%s] "
463                "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
464                "@sclk=%dMHz)\n",
465                DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
466                MDC_CLK, mdc_div, sclk/1000000);
467
468         return 0;
469 }
470
471 /**************************************************************************/
472 void setup_system_regs(struct net_device *dev)
473 {
474         unsigned short sysctl;
475
476         /*
477          * Odd word alignment for Receive Frame DMA word
478          * Configure checksum support and rcve frame word alignment
479          */
480         sysctl = bfin_read_EMAC_SYSCTL();
481 #if defined(BFIN_MAC_CSUM_OFFLOAD)
482         sysctl |= RXDWA | RXCKS;
483 #else
484         sysctl |= RXDWA;
485 #endif
486         bfin_write_EMAC_SYSCTL(sysctl);
487
488         bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
489
490         /* Initialize the TX DMA channel registers */
491         bfin_write_DMA2_X_COUNT(0);
492         bfin_write_DMA2_X_MODIFY(4);
493         bfin_write_DMA2_Y_COUNT(0);
494         bfin_write_DMA2_Y_MODIFY(0);
495
496         /* Initialize the RX DMA channel registers */
497         bfin_write_DMA1_X_COUNT(0);
498         bfin_write_DMA1_X_MODIFY(4);
499         bfin_write_DMA1_Y_COUNT(0);
500         bfin_write_DMA1_Y_MODIFY(0);
501 }
502
503 static void setup_mac_addr(u8 *mac_addr)
504 {
505         u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
506         u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
507
508         /* this depends on a little-endian machine */
509         bfin_write_EMAC_ADDRLO(addr_low);
510         bfin_write_EMAC_ADDRHI(addr_hi);
511 }
512
513 static int bf537mac_set_mac_address(struct net_device *dev, void *p)
514 {
515         struct sockaddr *addr = p;
516         if (netif_running(dev))
517                 return -EBUSY;
518         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
519         setup_mac_addr(dev->dev_addr);
520         return 0;
521 }
522
523 static void adjust_tx_list(void)
524 {
525         int timeout_cnt = MAX_TIMEOUT_CNT;
526
527         if (tx_list_head->status.status_word != 0
528             && current_tx_ptr != tx_list_head) {
529                 goto adjust_head;       /* released something, just return; */
530         }
531
532         /*
533          * if nothing released, check wait condition
534          * current's next can not be the head,
535          * otherwise the dma will not stop as we want
536          */
537         if (current_tx_ptr->next->next == tx_list_head) {
538                 while (tx_list_head->status.status_word == 0) {
539                         mdelay(1);
540                         if (tx_list_head->status.status_word != 0
541                             || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
542                                 goto adjust_head;
543                         }
544                         if (timeout_cnt-- < 0) {
545                                 printk(KERN_ERR DRV_NAME
546                                 ": wait for adjust tx list head timeout\n");
547                                 break;
548                         }
549                 }
550                 if (tx_list_head->status.status_word != 0) {
551                         goto adjust_head;
552                 }
553         }
554
555         return;
556
557 adjust_head:
558         do {
559                 tx_list_head->desc_a.config &= ~DMAEN;
560                 tx_list_head->status.status_word = 0;
561                 if (tx_list_head->skb) {
562                         dev_kfree_skb(tx_list_head->skb);
563                         tx_list_head->skb = NULL;
564                 } else {
565                         printk(KERN_ERR DRV_NAME
566                                ": no sk_buff in a transmitted frame!\n");
567                 }
568                 tx_list_head = tx_list_head->next;
569         } while (tx_list_head->status.status_word != 0
570                  && current_tx_ptr != tx_list_head);
571         return;
572
573 }
574
575 static int bf537mac_hard_start_xmit(struct sk_buff *skb,
576                                 struct net_device *dev)
577 {
578         unsigned int data;
579
580         current_tx_ptr->skb = skb;
581
582         /*
583          * Is skb->data always 16-bit aligned?
584          * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
585          */
586         if ((((unsigned int)(skb->data)) & 0x02) == 2) {
587                 /* move skb->data to current_tx_ptr payload */
588                 data = (unsigned int)(skb->data) - 2;
589                 *((unsigned short *)data) = (unsigned short)(skb->len);
590                 current_tx_ptr->desc_a.start_addr = (unsigned long)data;
591                 /* this is important! */
592                 blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
593
594         } else {
595                 *((unsigned short *)(current_tx_ptr->packet)) =
596                     (unsigned short)(skb->len);
597                 memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
598                        (skb->len));
599                 current_tx_ptr->desc_a.start_addr =
600                     (unsigned long)current_tx_ptr->packet;
601                 if (current_tx_ptr->status.status_word != 0)
602                         current_tx_ptr->status.status_word = 0;
603                 blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
604                                             packet,
605                                             (unsigned int)(current_tx_ptr->
606                                                            packet + skb->len) +
607                                             2);
608         }
609
610         /* enable this packet's dma */
611         current_tx_ptr->desc_a.config |= DMAEN;
612
613         /* tx dma is running, just return */
614         if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
615                 goto out;
616
617         /* tx dma is not running */
618         bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
619         /* dma enabled, read from memory, size is 6 */
620         bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
621         /* Turn on the EMAC tx */
622         bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
623
624 out:
625         adjust_tx_list();
626         current_tx_ptr = current_tx_ptr->next;
627         dev->trans_start = jiffies;
628         dev->stats.tx_packets++;
629         dev->stats.tx_bytes += (skb->len);
630         return 0;
631 }
632
633 static void bf537mac_rx(struct net_device *dev)
634 {
635         struct sk_buff *skb, *new_skb;
636         unsigned short len;
637
638         /* allocate a new skb for next time receive */
639         skb = current_rx_ptr->skb;
640         new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
641         if (!new_skb) {
642                 printk(KERN_NOTICE DRV_NAME
643                        ": rx: low on mem - packet dropped\n");
644                 dev->stats.rx_dropped++;
645                 goto out;
646         }
647         /* reserve 2 bytes for RXDWA padding */
648         skb_reserve(new_skb, 2);
649         current_rx_ptr->skb = new_skb;
650         current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
651
652         /* Invidate the data cache of skb->data range when it is write back
653          * cache. It will prevent overwritting the new data from DMA
654          */
655         blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
656                                          (unsigned long)new_skb->end);
657
658         len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
659         skb_put(skb, len);
660         blackfin_dcache_invalidate_range((unsigned long)skb->head,
661                                          (unsigned long)skb->tail);
662
663         dev->last_rx = jiffies;
664         skb->dev = dev;
665         skb->protocol = eth_type_trans(skb, dev);
666 #if defined(BFIN_MAC_CSUM_OFFLOAD)
667         skb->csum = current_rx_ptr->status.ip_payload_csum;
668         skb->ip_summed = CHECKSUM_COMPLETE;
669 #endif
670
671         netif_rx(skb);
672         dev->stats.rx_packets++;
673         dev->stats.rx_bytes += len;
674         current_rx_ptr->status.status_word = 0x00000000;
675         current_rx_ptr = current_rx_ptr->next;
676
677 out:
678         return;
679 }
680
681 /* interrupt routine to handle rx and error signal */
682 static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
683 {
684         struct net_device *dev = dev_id;
685         int number = 0;
686
687 get_one_packet:
688         if (current_rx_ptr->status.status_word == 0) {
689                 /* no more new packet received */
690                 if (number == 0) {
691                         if (current_rx_ptr->next->status.status_word != 0) {
692                                 current_rx_ptr = current_rx_ptr->next;
693                                 goto real_rx;
694                         }
695                 }
696                 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
697                                            DMA_DONE | DMA_ERR);
698                 return IRQ_HANDLED;
699         }
700
701 real_rx:
702         bf537mac_rx(dev);
703         number++;
704         goto get_one_packet;
705 }
706
707 #ifdef CONFIG_NET_POLL_CONTROLLER
708 static void bf537mac_poll(struct net_device *dev)
709 {
710         disable_irq(IRQ_MAC_RX);
711         bf537mac_interrupt(IRQ_MAC_RX, dev);
712         enable_irq(IRQ_MAC_RX);
713 }
714 #endif                          /* CONFIG_NET_POLL_CONTROLLER */
715
716 static void bf537mac_disable(void)
717 {
718         unsigned int opmode;
719
720         opmode = bfin_read_EMAC_OPMODE();
721         opmode &= (~RE);
722         opmode &= (~TE);
723         /* Turn off the EMAC */
724         bfin_write_EMAC_OPMODE(opmode);
725 }
726
727 /*
728  * Enable Interrupts, Receive, and Transmit
729  */
730 static void bf537mac_enable(void)
731 {
732         u32 opmode;
733
734         pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__);
735
736         /* Set RX DMA */
737         bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
738         bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
739
740         /* Wait MII done */
741         mdio_poll();
742
743         /* We enable only RX here */
744         /* ASTP   : Enable Automatic Pad Stripping
745            PR     : Promiscuous Mode for test
746            PSF    : Receive frames with total length less than 64 bytes.
747            FDMODE : Full Duplex Mode
748            LB     : Internal Loopback for test
749            RE     : Receiver Enable */
750         opmode = bfin_read_EMAC_OPMODE();
751         if (opmode & FDMODE)
752                 opmode |= PSF;
753         else
754                 opmode |= DRO | DC | PSF;
755         opmode |= RE;
756
757 #if defined(CONFIG_BFIN_MAC_RMII)
758         opmode |= RMII; /* For Now only 100MBit are supported */
759 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
760         opmode |= TE;
761 #endif
762 #endif
763         /* Turn on the EMAC rx */
764         bfin_write_EMAC_OPMODE(opmode);
765 }
766
767 /* Our watchdog timed out. Called by the networking layer */
768 static void bf537mac_timeout(struct net_device *dev)
769 {
770         pr_debug("%s: %s\n", dev->name, __FUNCTION__);
771
772         bf537mac_disable();
773
774         /* reset tx queue */
775         tx_list_tail = tx_list_head->next;
776
777         bf537mac_enable();
778
779         /* We can accept TX packets again */
780         dev->trans_start = jiffies;
781         netif_wake_queue(dev);
782 }
783
784 static void bf537mac_multicast_hash(struct net_device *dev)
785 {
786         u32 emac_hashhi, emac_hashlo;
787         struct dev_mc_list *dmi = dev->mc_list;
788         char *addrs;
789         int i;
790         u32 crc;
791
792         emac_hashhi = emac_hashlo = 0;
793
794         for (i = 0; i < dev->mc_count; i++) {
795                 addrs = dmi->dmi_addr;
796                 dmi = dmi->next;
797
798                 /* skip non-multicast addresses */
799                 if (!(*addrs & 1))
800                         continue;
801
802                 crc = ether_crc(ETH_ALEN, addrs);
803                 crc >>= 26;
804
805                 if (crc & 0x20)
806                         emac_hashhi |= 1 << (crc & 0x1f);
807                 else
808                         emac_hashlo |= 1 << (crc & 0x1f);
809         }
810
811         bfin_write_EMAC_HASHHI(emac_hashhi);
812         bfin_write_EMAC_HASHLO(emac_hashlo);
813
814         return;
815 }
816
817 /*
818  * This routine will, depending on the values passed to it,
819  * either make it accept multicast packets, go into
820  * promiscuous mode (for TCPDUMP and cousins) or accept
821  * a select set of multicast packets
822  */
823 static void bf537mac_set_multicast_list(struct net_device *dev)
824 {
825         u32 sysctl;
826
827         if (dev->flags & IFF_PROMISC) {
828                 printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
829                 sysctl = bfin_read_EMAC_OPMODE();
830                 sysctl |= RAF;
831                 bfin_write_EMAC_OPMODE(sysctl);
832         } else if (dev->flags & IFF_ALLMULTI) {
833                 /* accept all multicast */
834                 sysctl = bfin_read_EMAC_OPMODE();
835                 sysctl |= PAM;
836                 bfin_write_EMAC_OPMODE(sysctl);
837         } else if (dev->mc_count) {
838                 /* set up multicast hash table */
839                 sysctl = bfin_read_EMAC_OPMODE();
840                 sysctl |= HM;
841                 bfin_write_EMAC_OPMODE(sysctl);
842                 bf537mac_multicast_hash(dev);
843         } else {
844                 /* clear promisc or multicast mode */
845                 sysctl = bfin_read_EMAC_OPMODE();
846                 sysctl &= ~(RAF | PAM);
847                 bfin_write_EMAC_OPMODE(sysctl);
848         }
849 }
850
851 /*
852  * this puts the device in an inactive state
853  */
854 static void bf537mac_shutdown(struct net_device *dev)
855 {
856         /* Turn off the EMAC */
857         bfin_write_EMAC_OPMODE(0x00000000);
858         /* Turn off the EMAC RX DMA */
859         bfin_write_DMA1_CONFIG(0x0000);
860         bfin_write_DMA2_CONFIG(0x0000);
861 }
862
863 /*
864  * Open and Initialize the interface
865  *
866  * Set up everything, reset the card, etc..
867  */
868 static int bf537mac_open(struct net_device *dev)
869 {
870         struct bf537mac_local *lp = netdev_priv(dev);
871         int retval;
872         pr_debug("%s: %s\n", dev->name, __FUNCTION__);
873
874         /*
875          * Check that the address is valid.  If its not, refuse
876          * to bring the device up.  The user must specify an
877          * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
878          */
879         if (!is_valid_ether_addr(dev->dev_addr)) {
880                 printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
881                 return -EINVAL;
882         }
883
884         /* initial rx and tx list */
885         retval = desc_list_init();
886
887         if (retval)
888                 return retval;
889
890         phy_start(lp->phydev);
891         phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
892         setup_system_regs(dev);
893         bf537mac_disable();
894         bf537mac_enable();
895         pr_debug("hardware init finished\n");
896         netif_start_queue(dev);
897         netif_carrier_on(dev);
898
899         return 0;
900 }
901
902 /*
903  *
904  * this makes the board clean up everything that it can
905  * and not talk to the outside world.   Caused by
906  * an 'ifconfig ethX down'
907  */
908 static int bf537mac_close(struct net_device *dev)
909 {
910         struct bf537mac_local *lp = netdev_priv(dev);
911         pr_debug("%s: %s\n", dev->name, __FUNCTION__);
912
913         netif_stop_queue(dev);
914         netif_carrier_off(dev);
915
916         phy_stop(lp->phydev);
917         phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
918
919         /* clear everything */
920         bf537mac_shutdown(dev);
921
922         /* free the rx/tx buffers */
923         desc_list_free();
924
925         return 0;
926 }
927
928 static int __init bf537mac_probe(struct net_device *dev)
929 {
930         struct bf537mac_local *lp = netdev_priv(dev);
931         int retval;
932         int i;
933
934         /* Grab the MAC address in the MAC */
935         *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
936         *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
937
938         /* probe mac */
939         /*todo: how to proble? which is revision_register */
940         bfin_write_EMAC_ADDRLO(0x12345678);
941         if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
942                 pr_debug("can't detect bf537 mac!\n");
943                 retval = -ENODEV;
944                 goto err_out;
945         }
946
947         /* set the GPIO pins to Ethernet mode */
948         retval = setup_pin_mux(1);
949         if (retval)
950                 return retval;
951
952         /*Is it valid? (Did bootloader initialize it?) */
953         if (!is_valid_ether_addr(dev->dev_addr)) {
954                 /* Grab the MAC from the board somehow - this is done in the
955                    arch/blackfin/mach-bf537/boards/eth_mac.c */
956                 bfin_get_ether_addr(dev->dev_addr);
957         }
958
959         /* If still not valid, get a random one */
960         if (!is_valid_ether_addr(dev->dev_addr)) {
961                 random_ether_addr(dev->dev_addr);
962         }
963
964         setup_mac_addr(dev->dev_addr);
965
966         /* MDIO bus initial */
967         lp->mii_bus.priv = dev;
968         lp->mii_bus.read = mdiobus_read;
969         lp->mii_bus.write = mdiobus_write;
970         lp->mii_bus.reset = mdiobus_reset;
971         lp->mii_bus.name = "bfin_mac_mdio";
972         snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
973         lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
974         for (i = 0; i < PHY_MAX_ADDR; ++i)
975                 lp->mii_bus.irq[i] = PHY_POLL;
976
977         mdiobus_register(&lp->mii_bus);
978
979         retval = mii_probe(dev);
980         if (retval)
981                 return retval;
982
983         /* Fill in the fields of the device structure with ethernet values. */
984         ether_setup(dev);
985
986         dev->open = bf537mac_open;
987         dev->stop = bf537mac_close;
988         dev->hard_start_xmit = bf537mac_hard_start_xmit;
989         dev->set_mac_address = bf537mac_set_mac_address;
990         dev->tx_timeout = bf537mac_timeout;
991         dev->set_multicast_list = bf537mac_set_multicast_list;
992 #ifdef CONFIG_NET_POLL_CONTROLLER
993         dev->poll_controller = bf537mac_poll;
994 #endif
995
996         spin_lock_init(&lp->lock);
997
998         /* now, enable interrupts */
999         /* register irq handler */
1000         if (request_irq
1001             (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
1002              "EMAC_RX", dev)) {
1003                 printk(KERN_WARNING DRV_NAME
1004                        ": Unable to attach BlackFin MAC RX interrupt\n");
1005                 return -EBUSY;
1006         }
1007
1008
1009         retval = register_netdev(dev);
1010         if (retval == 0) {
1011                 /* now, print out the card info, in a short format.. */
1012                 printk(KERN_INFO "%s: Version %s, %s\n",
1013                          DRV_NAME, DRV_VERSION, DRV_DESC);
1014         }
1015
1016 err_out:
1017         return retval;
1018 }
1019
1020 static int bfin_mac_probe(struct platform_device *pdev)
1021 {
1022         struct net_device *ndev;
1023
1024         ndev = alloc_etherdev(sizeof(struct bf537mac_local));
1025         if (!ndev) {
1026                 printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
1027                 return -ENOMEM;
1028         }
1029
1030         SET_NETDEV_DEV(ndev, &pdev->dev);
1031
1032         platform_set_drvdata(pdev, ndev);
1033
1034         if (bf537mac_probe(ndev) != 0) {
1035                 platform_set_drvdata(pdev, NULL);
1036                 free_netdev(ndev);
1037                 printk(KERN_WARNING DRV_NAME ": not found\n");
1038                 return -ENODEV;
1039         }
1040
1041         return 0;
1042 }
1043
1044 static int bfin_mac_remove(struct platform_device *pdev)
1045 {
1046         struct net_device *ndev = platform_get_drvdata(pdev);
1047
1048         platform_set_drvdata(pdev, NULL);
1049
1050         unregister_netdev(ndev);
1051
1052         free_irq(IRQ_MAC_RX, ndev);
1053
1054         free_netdev(ndev);
1055
1056         setup_pin_mux(0);
1057
1058         return 0;
1059 }
1060
1061 #ifdef CONFIG_PM
1062 static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1063 {
1064         struct net_device *net_dev = platform_get_drvdata(pdev);
1065
1066         if (netif_running(net_dev))
1067                 bf537mac_close(net_dev);
1068
1069         return 0;
1070 }
1071
1072 static int bfin_mac_resume(struct platform_device *pdev)
1073 {
1074         struct net_device *net_dev = platform_get_drvdata(pdev);
1075
1076         if (netif_running(net_dev))
1077                 bf537mac_open(net_dev);
1078
1079         return 0;
1080 }
1081 #else
1082 #define bfin_mac_suspend NULL
1083 #define bfin_mac_resume NULL
1084 #endif  /* CONFIG_PM */
1085
1086 static struct platform_driver bfin_mac_driver = {
1087         .probe = bfin_mac_probe,
1088         .remove = bfin_mac_remove,
1089         .resume = bfin_mac_resume,
1090         .suspend = bfin_mac_suspend,
1091         .driver = {
1092                    .name = DRV_NAME,
1093                    },
1094 };
1095
1096 static int __init bfin_mac_init(void)
1097 {
1098         return platform_driver_register(&bfin_mac_driver);
1099 }
1100
1101 module_init(bfin_mac_init);
1102
1103 static void __exit bfin_mac_cleanup(void)
1104 {
1105         platform_driver_unregister(&bfin_mac_driver);
1106 }
1107
1108 module_exit(bfin_mac_cleanup);