2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
35 #include <linux/inet_lro.h>
38 #include <asm/firmware.h>
39 #include <asm/pasemi_dma.h>
41 #include "pasemi_mac.h"
43 /* We have our own align, since ppc64 in general has it at 0 because
44 * of design flaws in some of the server bridge chips. However, for
45 * PWRficient doing the unaligned copies is more expensive than doing
46 * unaligned DMA, so make sure the data is aligned instead.
48 #define LOCAL_SKB_ALIGN 2
58 #define LRO_MAX_AGGR 64
61 #define PE_MAX_MTU 9000
62 #define PE_DEF_MTU ETH_DATA_LEN
64 #define DEFAULT_MSG_ENABLE \
74 MODULE_LICENSE("GPL");
75 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
76 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
78 static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
79 module_param(debug, int, 0);
80 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
82 extern const struct ethtool_ops pasemi_mac_ethtool_ops;
84 static int translation_enabled(void)
86 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
89 return firmware_has_feature(FW_FEATURE_LPAR);
93 static void write_iob_reg(unsigned int reg, unsigned int val)
95 pasemi_write_iob_reg(reg, val);
98 static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
100 return pasemi_read_mac_reg(mac->dma_if, reg);
103 static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
106 pasemi_write_mac_reg(mac->dma_if, reg, val);
109 static unsigned int read_dma_reg(unsigned int reg)
111 return pasemi_read_dma_reg(reg);
114 static void write_dma_reg(unsigned int reg, unsigned int val)
116 pasemi_write_dma_reg(reg, val);
119 static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
124 static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
129 static inline void prefetch_skb(const struct sk_buff *skb)
139 static int mac_to_intf(struct pasemi_mac *mac)
141 struct pci_dev *pdev = mac->pdev;
143 int nintf, off, i, j;
144 int devfn = pdev->devfn;
146 tmp = read_dma_reg(PAS_DMA_CAP_IFI);
147 nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
148 off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
150 /* IOFF contains the offset to the registers containing the
151 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
152 * of total interfaces. Each register contains 4 devfns.
153 * Just do a linear search until we find the devfn of the MAC
154 * we're trying to look up.
157 for (i = 0; i < (nintf+3)/4; i++) {
158 tmp = read_dma_reg(off+4*i);
159 for (j = 0; j < 4; j++) {
160 if (((tmp >> (8*j)) & 0xff) == devfn)
167 static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
171 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
172 flags &= ~PAS_MAC_CFG_PCFG_PE;
173 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
176 static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
180 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
181 flags |= PAS_MAC_CFG_PCFG_PE;
182 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
185 static int pasemi_get_mac_addr(struct pasemi_mac *mac)
187 struct pci_dev *pdev = mac->pdev;
188 struct device_node *dn = pci_device_to_OF_node(pdev);
195 "No device node for mac, not configuring\n");
199 maddr = of_get_property(dn, "local-mac-address", &len);
201 if (maddr && len == 6) {
202 memcpy(mac->mac_addr, maddr, 6);
206 /* Some old versions of firmware mistakenly uses mac-address
207 * (and as a string) instead of a byte array in local-mac-address.
211 maddr = of_get_property(dn, "mac-address", NULL);
215 "no mac address in device tree, not configuring\n");
219 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
220 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
222 "can't parse mac address, not configuring\n");
226 memcpy(mac->mac_addr, addr, 6);
231 static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
233 struct pasemi_mac *mac = netdev_priv(dev);
234 struct sockaddr *addr = p;
235 unsigned int adr0, adr1;
237 if (!is_valid_ether_addr(addr->sa_data))
240 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
242 adr0 = dev->dev_addr[2] << 24 |
243 dev->dev_addr[3] << 16 |
244 dev->dev_addr[4] << 8 |
246 adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
248 adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
250 pasemi_mac_intf_disable(mac);
251 write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
252 write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
253 pasemi_mac_intf_enable(mac);
258 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
259 void **tcph, u64 *hdr_flags, void *data)
261 u64 macrx = (u64) data;
265 /* IPv4 header checksum failed */
266 if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK)
270 skb_reset_network_header(skb);
272 if (iph->protocol != IPPROTO_TCP)
275 ip_len = ip_hdrlen(skb);
276 skb_set_transport_header(skb, ip_len);
277 *tcph = tcp_hdr(skb);
279 /* check if ip header and tcp header are complete */
280 if (iph->tot_len < ip_len + tcp_hdrlen(skb))
283 *hdr_flags = LRO_IPV4 | LRO_TCP;
289 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
292 const dma_addr_t *dmas)
295 struct pci_dev *pdev = mac->dma_pdev;
297 pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
299 for (f = 0; f < nfrags; f++) {
300 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
302 pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
304 dev_kfree_skb_irq(skb);
306 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
307 * aligned up to a power of 2
309 return (nfrags + 3) & ~1;
312 static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
314 struct pasemi_mac_csring *ring;
319 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
320 offsetof(struct pasemi_mac_csring, chan));
323 dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
327 chno = ring->chan.chno;
329 ring->size = CS_RING_SIZE;
330 ring->next_to_fill = 0;
332 /* Allocate descriptors */
333 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
336 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
337 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
338 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
339 val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
341 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
343 ring->events[0] = pasemi_dma_alloc_flag();
344 ring->events[1] = pasemi_dma_alloc_flag();
345 if (ring->events[0] < 0 || ring->events[1] < 0)
348 pasemi_dma_clear_flag(ring->events[0]);
349 pasemi_dma_clear_flag(ring->events[1]);
351 ring->fun = pasemi_dma_alloc_fun();
355 cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
356 PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
357 PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
359 if (translation_enabled())
360 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
362 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
365 pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
366 PAS_DMA_TXCHAN_TCMDSTA_DB |
367 PAS_DMA_TXCHAN_TCMDSTA_DE |
368 PAS_DMA_TXCHAN_TCMDSTA_DA);
374 if (ring->events[0] >= 0)
375 pasemi_dma_free_flag(ring->events[0]);
376 if (ring->events[1] >= 0)
377 pasemi_dma_free_flag(ring->events[1]);
378 pasemi_dma_free_ring(&ring->chan);
380 pasemi_dma_free_chan(&ring->chan);
386 static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
389 mac->cs[0] = pasemi_mac_setup_csring(mac);
390 if (mac->type == MAC_TYPE_XAUI)
391 mac->cs[1] = pasemi_mac_setup_csring(mac);
395 for (i = 0; i < MAX_CS; i++)
400 static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
402 pasemi_dma_stop_chan(&csring->chan);
403 pasemi_dma_free_flag(csring->events[0]);
404 pasemi_dma_free_flag(csring->events[1]);
405 pasemi_dma_free_ring(&csring->chan);
406 pasemi_dma_free_chan(&csring->chan);
409 static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
411 struct pasemi_mac_rxring *ring;
412 struct pasemi_mac *mac = netdev_priv(dev);
416 ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
417 offsetof(struct pasemi_mac_rxring, chan));
420 dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
423 chno = ring->chan.chno;
425 spin_lock_init(&ring->lock);
427 ring->size = RX_RING_SIZE;
428 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
429 RX_RING_SIZE, GFP_KERNEL);
431 if (!ring->ring_info)
434 /* Allocate descriptors */
435 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
438 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
439 RX_RING_SIZE * sizeof(u64),
440 &ring->buf_dma, GFP_KERNEL);
444 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
446 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
447 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
449 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
450 PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
451 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
453 cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
455 if (translation_enabled())
456 cfg |= PAS_DMA_RXCHAN_CFG_CTR;
458 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
460 write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
461 PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
463 write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
464 PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
465 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
467 cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
468 PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
469 PAS_DMA_RXINT_CFG_HEN;
471 if (translation_enabled())
472 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
474 write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
476 ring->next_to_fill = 0;
477 ring->next_to_clean = 0;
484 kfree(ring->ring_info);
486 pasemi_dma_free_chan(&ring->chan);
491 static struct pasemi_mac_txring *
492 pasemi_mac_setup_tx_resources(const struct net_device *dev)
494 struct pasemi_mac *mac = netdev_priv(dev);
496 struct pasemi_mac_txring *ring;
500 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
501 offsetof(struct pasemi_mac_txring, chan));
504 dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
508 chno = ring->chan.chno;
510 spin_lock_init(&ring->lock);
512 ring->size = TX_RING_SIZE;
513 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
514 TX_RING_SIZE, GFP_KERNEL);
515 if (!ring->ring_info)
518 /* Allocate descriptors */
519 if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
522 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
523 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
524 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
525 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
527 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
529 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
530 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
531 PAS_DMA_TXCHAN_CFG_UP |
532 PAS_DMA_TXCHAN_CFG_WT(4);
534 if (translation_enabled())
535 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
537 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
539 ring->next_to_fill = 0;
540 ring->next_to_clean = 0;
546 kfree(ring->ring_info);
548 pasemi_dma_free_chan(&ring->chan);
553 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
555 struct pasemi_mac_txring *txring = tx_ring(mac);
557 struct pasemi_mac_buffer *info;
558 dma_addr_t dmas[MAX_SKB_FRAGS+1];
562 start = txring->next_to_clean;
563 limit = txring->next_to_fill;
565 /* Compensate for when fill has wrapped and clean has not */
567 limit += TX_RING_SIZE;
569 for (i = start; i < limit; i += freed) {
570 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
571 if (info->dma && info->skb) {
572 nfrags = skb_shinfo(info->skb)->nr_frags;
573 for (j = 0; j <= nfrags; j++)
574 dmas[j] = txring->ring_info[(i+1+j) &
575 (TX_RING_SIZE-1)].dma;
576 freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
582 kfree(txring->ring_info);
583 pasemi_dma_free_chan(&txring->chan);
587 static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
589 struct pasemi_mac_rxring *rx = rx_ring(mac);
591 struct pasemi_mac_buffer *info;
593 for (i = 0; i < RX_RING_SIZE; i++) {
594 info = &RX_DESC_INFO(rx, i);
595 if (info->skb && info->dma) {
596 pci_unmap_single(mac->dma_pdev,
600 dev_kfree_skb_any(info->skb);
606 for (i = 0; i < RX_RING_SIZE; i++)
610 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
612 pasemi_mac_free_rx_buffers(mac);
614 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
615 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
617 kfree(rx_ring(mac)->ring_info);
618 pasemi_dma_free_chan(&rx_ring(mac)->chan);
622 static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
625 const struct pasemi_mac *mac = netdev_priv(dev);
626 struct pasemi_mac_rxring *rx = rx_ring(mac);
632 fill = rx_ring(mac)->next_to_fill;
633 for (count = 0; count < limit; count++) {
634 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
635 u64 *buff = &RX_BUFF(rx, fill);
642 skb = dev_alloc_skb(mac->bufsz);
643 skb_reserve(skb, LOCAL_SKB_ALIGN);
648 dma = pci_map_single(mac->dma_pdev, skb->data,
649 mac->bufsz - LOCAL_SKB_ALIGN,
652 if (unlikely(dma_mapping_error(dma))) {
653 dev_kfree_skb_irq(info->skb);
659 *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
665 write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
667 rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
671 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
673 struct pasemi_mac_rxring *rx = rx_ring(mac);
674 unsigned int reg, pcnt;
675 /* Re-enable packet count interrupts: finally
676 * ack the packet count interrupt we got in rx_intr.
679 pcnt = *rx->chan.status & PAS_STATUS_PCNT_M;
681 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
683 if (*rx->chan.status & PAS_STATUS_TIMER)
684 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
686 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
689 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
691 unsigned int reg, pcnt;
693 /* Re-enable packet count interrupts */
694 pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
696 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
698 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
702 static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
705 unsigned int rcmdsta, ccmdsta;
706 struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
708 if (!netif_msg_rx_err(mac))
711 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
712 ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
714 printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
715 macrx, *chan->status);
717 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
721 static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
725 struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
727 if (!netif_msg_tx_err(mac))
730 cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
732 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
733 "tx status 0x%016lx\n", mactx, *chan->status);
735 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
738 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
741 const struct pasemi_dmachan *chan = &rx->chan;
742 struct pasemi_mac *mac = rx->mac;
743 struct pci_dev *pdev = mac->dma_pdev;
745 int count, buf_index, tot_bytes, packets;
746 struct pasemi_mac_buffer *info;
755 spin_lock(&rx->lock);
757 n = rx->next_to_clean;
759 prefetch(&RX_DESC(rx, n));
761 for (count = 0; count < limit; count++) {
762 macrx = RX_DESC(rx, n);
763 prefetch(&RX_DESC(rx, n+4));
765 if ((macrx & XCT_MACRX_E) ||
766 (*chan->status & PAS_STATUS_ERROR))
767 pasemi_mac_rx_error(mac, macrx);
769 if (!(macrx & XCT_MACRX_O))
774 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
776 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
780 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
781 info = &RX_DESC_INFO(rx, buf_index);
787 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
789 pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
792 if (macrx & XCT_MACRX_CRC) {
793 /* CRC error flagged */
794 mac->netdev->stats.rx_errors++;
795 mac->netdev->stats.rx_crc_errors++;
796 /* No need to free skb, it'll be reused */
803 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
805 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
808 skb->ip_summed = CHECKSUM_NONE;
813 /* Don't include CRC */
816 skb->protocol = eth_type_trans(skb, mac->netdev);
817 lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx);
821 RX_DESC(rx, n+1) = 0;
823 /* Need to zero it out since hardware doesn't, since the
824 * replenish loop uses it to tell when it's done.
826 RX_BUFF(rx, buf_index) = 0;
831 if (n > RX_RING_SIZE) {
832 /* Errata 5971 workaround: L2 target of headers */
833 write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
834 n &= (RX_RING_SIZE-1);
837 rx_ring(mac)->next_to_clean = n;
839 lro_flush_all(&mac->lro_mgr);
841 /* Increase is in number of 16-byte entries, and since each descriptor
842 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
845 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
847 pasemi_mac_replenish_rx_ring(mac->netdev, count);
849 mac->netdev->stats.rx_bytes += tot_bytes;
850 mac->netdev->stats.rx_packets += packets;
852 spin_unlock(&rx_ring(mac)->lock);
857 /* Can't make this too large or we blow the kernel stack limits */
858 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
860 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
862 struct pasemi_dmachan *chan = &txring->chan;
863 struct pasemi_mac *mac = txring->mac;
865 unsigned int start, descr_count, buf_count, batch_limit;
866 unsigned int ring_limit;
867 unsigned int total_count;
869 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
870 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
871 int nf[TX_CLEAN_BATCHSIZE];
875 batch_limit = TX_CLEAN_BATCHSIZE;
877 spin_lock_irqsave(&txring->lock, flags);
879 start = txring->next_to_clean;
880 ring_limit = txring->next_to_fill;
882 prefetch(&TX_DESC_INFO(txring, start+1).skb);
884 /* Compensate for when fill has wrapped but clean has not */
885 if (start > ring_limit)
886 ring_limit += TX_RING_SIZE;
892 descr_count < batch_limit && i < ring_limit;
894 u64 mactx = TX_DESC(txring, i);
897 if ((mactx & XCT_MACTX_E) ||
898 (*chan->status & PAS_STATUS_ERROR))
899 pasemi_mac_tx_error(mac, mactx);
901 /* Skip over control descriptors */
902 if (!(mactx & XCT_MACTX_LLEN_M)) {
903 TX_DESC(txring, i) = 0;
904 TX_DESC(txring, i+1) = 0;
909 skb = TX_DESC_INFO(txring, i+1).skb;
910 nr_frags = TX_DESC_INFO(txring, i).dma;
912 if (unlikely(mactx & XCT_MACTX_O))
913 /* Not yet transmitted */
916 buf_count = 2 + nr_frags;
917 /* Since we always fill with an even number of entries, make
918 * sure we skip any unused one at the end as well.
923 for (j = 0; j <= nr_frags; j++)
924 dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
926 skbs[descr_count] = skb;
927 nf[descr_count] = nr_frags;
929 TX_DESC(txring, i) = 0;
930 TX_DESC(txring, i+1) = 0;
934 txring->next_to_clean = i & (TX_RING_SIZE-1);
936 spin_unlock_irqrestore(&txring->lock, flags);
937 netif_wake_queue(mac->netdev);
939 for (i = 0; i < descr_count; i++)
940 pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
942 total_count += descr_count;
944 /* If the batch was full, try to clean more */
945 if (descr_count == batch_limit)
952 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
954 const struct pasemi_mac_rxring *rxring = data;
955 struct pasemi_mac *mac = rxring->mac;
956 struct net_device *dev = mac->netdev;
957 const struct pasemi_dmachan *chan = &rxring->chan;
960 if (!(*chan->status & PAS_STATUS_CAUSE_M))
963 /* Don't reset packet count so it won't fire again but clear
968 if (*chan->status & PAS_STATUS_SOFT)
969 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
970 if (*chan->status & PAS_STATUS_ERROR)
971 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
973 netif_rx_schedule(dev, &mac->napi);
975 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
980 #define TX_CLEAN_INTERVAL HZ
982 static void pasemi_mac_tx_timer(unsigned long data)
984 struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
985 struct pasemi_mac *mac = txring->mac;
987 pasemi_mac_clean_tx(txring);
989 mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);
991 pasemi_mac_restart_tx_intr(mac);
994 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
996 struct pasemi_mac_txring *txring = data;
997 const struct pasemi_dmachan *chan = &txring->chan;
998 struct pasemi_mac *mac = txring->mac;
1001 if (!(*chan->status & PAS_STATUS_CAUSE_M))
1006 if (*chan->status & PAS_STATUS_SOFT)
1007 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
1008 if (*chan->status & PAS_STATUS_ERROR)
1009 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
1011 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
1013 netif_rx_schedule(mac->netdev, &mac->napi);
1016 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
1021 static void pasemi_adjust_link(struct net_device *dev)
1023 struct pasemi_mac *mac = netdev_priv(dev);
1026 unsigned int new_flags;
1028 if (!mac->phydev->link) {
1029 /* If no link, MAC speed settings don't matter. Just report
1030 * link down and return.
1032 if (mac->link && netif_msg_link(mac))
1033 printk(KERN_INFO "%s: Link is down.\n", dev->name);
1035 netif_carrier_off(dev);
1036 pasemi_mac_intf_disable(mac);
1041 pasemi_mac_intf_enable(mac);
1042 netif_carrier_on(dev);
1045 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1046 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
1047 PAS_MAC_CFG_PCFG_TSR_M);
1049 if (!mac->phydev->duplex)
1050 new_flags |= PAS_MAC_CFG_PCFG_HD;
1052 switch (mac->phydev->speed) {
1054 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
1055 PAS_MAC_CFG_PCFG_TSR_1G;
1058 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
1059 PAS_MAC_CFG_PCFG_TSR_100M;
1062 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
1063 PAS_MAC_CFG_PCFG_TSR_10M;
1066 printk("Unsupported speed %d\n", mac->phydev->speed);
1069 /* Print on link or speed/duplex change */
1070 msg = mac->link != mac->phydev->link || flags != new_flags;
1072 mac->duplex = mac->phydev->duplex;
1073 mac->speed = mac->phydev->speed;
1074 mac->link = mac->phydev->link;
1076 if (new_flags != flags)
1077 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
1079 if (msg && netif_msg_link(mac))
1080 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
1081 dev->name, mac->speed, mac->duplex ? "full" : "half");
1084 static int pasemi_mac_phy_init(struct net_device *dev)
1086 struct pasemi_mac *mac = netdev_priv(dev);
1087 struct device_node *dn, *phy_dn;
1088 struct phy_device *phydev;
1089 unsigned int phy_id;
1091 const unsigned int *prop;
1095 dn = pci_device_to_OF_node(mac->pdev);
1096 ph = of_get_property(dn, "phy-handle", NULL);
1099 phy_dn = of_find_node_by_phandle(*ph);
1101 prop = of_get_property(phy_dn, "reg", NULL);
1102 ret = of_address_to_resource(phy_dn->parent, 0, &r);
1107 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
1109 of_node_put(phy_dn);
1115 phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
1117 if (IS_ERR(phydev)) {
1118 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
1119 return PTR_ERR(phydev);
1122 mac->phydev = phydev;
1127 of_node_put(phy_dn);
1132 static int pasemi_mac_open(struct net_device *dev)
1134 struct pasemi_mac *mac = netdev_priv(dev);
1138 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
1139 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
1140 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
1142 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
1144 ret = pasemi_mac_setup_rx_resources(dev);
1146 goto out_rx_resources;
1148 mac->tx = pasemi_mac_setup_tx_resources(dev);
1153 if (dev->mtu > 1500) {
1154 pasemi_mac_setup_csrings(mac);
1159 /* Zero out rmon counters */
1160 for (i = 0; i < 32; i++)
1161 write_mac_reg(mac, PAS_MAC_RMON(i), 0);
1163 /* 0x3ff with 33MHz clock is about 31us */
1164 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
1165 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
1167 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
1168 PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
1170 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
1171 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
1173 write_mac_reg(mac, PAS_MAC_IPC_CHNL,
1174 PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
1175 PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
1178 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1179 PAS_DMA_RXINT_RCMDSTA_EN |
1180 PAS_DMA_RXINT_RCMDSTA_DROPS_M |
1181 PAS_DMA_RXINT_RCMDSTA_BP |
1182 PAS_DMA_RXINT_RCMDSTA_OO |
1183 PAS_DMA_RXINT_RCMDSTA_BT);
1185 /* enable rx channel */
1186 pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
1187 PAS_DMA_RXCHAN_CCMDSTA_OD |
1188 PAS_DMA_RXCHAN_CCMDSTA_FD |
1189 PAS_DMA_RXCHAN_CCMDSTA_DT);
1191 /* enable tx channel */
1192 pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
1193 PAS_DMA_TXCHAN_TCMDSTA_DB |
1194 PAS_DMA_TXCHAN_TCMDSTA_DE |
1195 PAS_DMA_TXCHAN_TCMDSTA_DA);
1197 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
1199 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
1202 /* Clear out any residual packet count state from firmware */
1203 pasemi_mac_restart_rx_intr(mac);
1204 pasemi_mac_restart_tx_intr(mac);
1206 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
1208 if (mac->type == MAC_TYPE_GMAC)
1209 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
1211 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
1213 /* Enable interface in MAC */
1214 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1216 ret = pasemi_mac_phy_init(dev);
1218 /* Since we won't get link notification, just enable RX */
1219 pasemi_mac_intf_enable(mac);
1220 if (mac->type == MAC_TYPE_GMAC) {
1221 /* Warn for missing PHY on SGMII (1Gig) ports */
1222 dev_warn(&mac->pdev->dev,
1223 "PHY init failed: %d.\n", ret);
1224 dev_warn(&mac->pdev->dev,
1225 "Defaulting to 1Gbit full duplex\n");
1229 netif_start_queue(dev);
1230 napi_enable(&mac->napi);
1232 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1235 ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1236 mac->tx_irq_name, mac->tx);
1238 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1239 mac->tx->chan.irq, ret);
1243 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1246 ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
1247 mac->rx_irq_name, mac->rx);
1249 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1250 mac->rx->chan.irq, ret);
1255 phy_start(mac->phydev);
1257 init_timer(&mac->tx->clean_timer);
1258 mac->tx->clean_timer.function = pasemi_mac_tx_timer;
1259 mac->tx->clean_timer.data = (unsigned long)mac->tx;
1260 mac->tx->clean_timer.expires = jiffies+HZ;
1261 add_timer(&mac->tx->clean_timer);
1266 free_irq(mac->tx->chan.irq, mac->tx);
1268 napi_disable(&mac->napi);
1269 netif_stop_queue(dev);
1272 pasemi_mac_free_tx_resources(mac);
1273 pasemi_mac_free_rx_resources(mac);
1279 #define MAX_RETRIES 5000
1281 static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
1283 unsigned int sta, retries;
1284 int txch = tx_ring(mac)->chan.chno;
1286 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
1287 PAS_DMA_TXCHAN_TCMDSTA_ST);
1289 for (retries = 0; retries < MAX_RETRIES; retries++) {
1290 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1291 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1296 if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1297 dev_err(&mac->dma_pdev->dev,
1298 "Failed to stop tx channel, tcmdsta %08x\n", sta);
1300 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
1303 static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
1305 unsigned int sta, retries;
1306 int rxch = rx_ring(mac)->chan.chno;
1308 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
1309 PAS_DMA_RXCHAN_CCMDSTA_ST);
1310 for (retries = 0; retries < MAX_RETRIES; retries++) {
1311 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1312 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1317 if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1318 dev_err(&mac->dma_pdev->dev,
1319 "Failed to stop rx channel, ccmdsta 08%x\n", sta);
1320 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
1323 static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
1325 unsigned int sta, retries;
1327 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1328 PAS_DMA_RXINT_RCMDSTA_ST);
1329 for (retries = 0; retries < MAX_RETRIES; retries++) {
1330 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1331 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1336 if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1337 dev_err(&mac->dma_pdev->dev,
1338 "Failed to stop rx interface, rcmdsta %08x\n", sta);
1339 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1342 static int pasemi_mac_close(struct net_device *dev)
1344 struct pasemi_mac *mac = netdev_priv(dev);
1348 rxch = rx_ring(mac)->chan.chno;
1349 txch = tx_ring(mac)->chan.chno;
1352 phy_stop(mac->phydev);
1353 phy_disconnect(mac->phydev);
1356 del_timer_sync(&mac->tx->clean_timer);
1358 netif_stop_queue(dev);
1359 napi_disable(&mac->napi);
1361 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1362 if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
1363 PAS_DMA_RXINT_RCMDSTA_OO |
1364 PAS_DMA_RXINT_RCMDSTA_BT))
1365 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
1367 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1368 if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
1369 PAS_DMA_RXCHAN_CCMDSTA_OD |
1370 PAS_DMA_RXCHAN_CCMDSTA_FD |
1371 PAS_DMA_RXCHAN_CCMDSTA_DT))
1372 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
1374 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1375 if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
1376 PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1377 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
1379 /* Clean out any pending buffers */
1380 pasemi_mac_clean_tx(tx_ring(mac));
1381 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1383 pasemi_mac_pause_txchan(mac);
1384 pasemi_mac_pause_rxint(mac);
1385 pasemi_mac_pause_rxchan(mac);
1386 pasemi_mac_intf_disable(mac);
1388 free_irq(mac->tx->chan.irq, mac->tx);
1389 free_irq(mac->rx->chan.irq, mac->rx);
1391 for (i = 0; i < mac->num_cs; i++)
1392 pasemi_mac_free_csring(mac->cs[i]);
1394 /* Free resources */
1395 pasemi_mac_free_rx_resources(mac);
1396 pasemi_mac_free_tx_resources(mac);
1401 static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
1402 const dma_addr_t *map,
1403 const unsigned int *map_size,
1404 struct pasemi_mac_txring *txring,
1405 struct pasemi_mac_csring *csring)
1409 const int nh_off = skb_network_offset(skb);
1410 const int nh_len = skb_network_header_len(skb);
1411 const int nfrags = skb_shinfo(skb)->nr_frags;
1412 int cs_size, i, fill, hdr, cpyhdr, evt;
1415 fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
1416 XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1417 XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
1418 XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
1420 switch (ip_hdr(skb)->protocol) {
1422 fund |= XCT_FUN_SIG_TCP4;
1423 /* TCP checksum is 16 bytes into the header */
1424 cs_dest = map[0] + skb_transport_offset(skb) + 16;
1427 fund |= XCT_FUN_SIG_UDP4;
1428 /* UDP checksum is 6 bytes into the header */
1429 cs_dest = map[0] + skb_transport_offset(skb) + 6;
1435 /* Do the checksum offloaded */
1436 fill = csring->next_to_fill;
1439 CS_DESC(csring, fill++) = fund;
1440 /* Room for 8BRES. Checksum result is really 2 bytes into it */
1441 csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
1442 CS_DESC(csring, fill++) = 0;
1444 CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
1445 for (i = 1; i <= nfrags; i++)
1446 CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1452 /* Copy the result into the TCP packet */
1454 CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
1455 XCT_FUN_LLEN(2) | XCT_FUN_SE;
1456 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
1457 CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
1460 evt = !csring->last_event;
1461 csring->last_event = evt;
1463 /* Event handshaking with MAC TX */
1464 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1465 CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
1466 CS_DESC(csring, fill++) = 0;
1467 CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1468 CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
1469 CS_DESC(csring, fill++) = 0;
1470 csring->next_to_fill = fill & (CS_RING_SIZE-1);
1472 cs_size = fill - hdr;
1473 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
1475 /* TX-side event handshaking */
1476 fill = txring->next_to_fill;
1477 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1478 CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
1479 TX_DESC(txring, fill++) = 0;
1480 TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
1481 CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
1482 TX_DESC(txring, fill++) = 0;
1483 txring->next_to_fill = fill;
1485 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
1490 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1492 struct pasemi_mac * const mac = netdev_priv(dev);
1493 struct pasemi_mac_txring * const txring = tx_ring(mac);
1494 struct pasemi_mac_csring *csring;
1497 dma_addr_t map[MAX_SKB_FRAGS+1];
1498 unsigned int map_size[MAX_SKB_FRAGS+1];
1499 unsigned long flags;
1502 const int nh_off = skb_network_offset(skb);
1503 const int nh_len = skb_network_header_len(skb);
1505 prefetch(&txring->ring_info);
1507 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1509 nfrags = skb_shinfo(skb)->nr_frags;
1511 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1513 map_size[0] = skb_headlen(skb);
1514 if (dma_mapping_error(map[0]))
1515 goto out_err_nolock;
1517 for (i = 0; i < nfrags; i++) {
1518 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1520 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1521 frag->page_offset, frag->size,
1523 map_size[i+1] = frag->size;
1524 if (dma_mapping_error(map[i+1])) {
1526 goto out_err_nolock;
1530 if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
1531 switch (ip_hdr(skb)->protocol) {
1533 dflags |= XCT_MACTX_CSUM_TCP;
1534 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1535 dflags |= XCT_MACTX_IPO(nh_off);
1538 dflags |= XCT_MACTX_CSUM_UDP;
1539 dflags |= XCT_MACTX_IPH(nh_len >> 2);
1540 dflags |= XCT_MACTX_IPO(nh_off);
1547 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1549 spin_lock_irqsave(&txring->lock, flags);
1551 /* Avoid stepping on the same cache line that the DMA controller
1552 * is currently about to send, so leave at least 8 words available.
1553 * Total free space needed is mactx + fragments + 8
1555 if (RING_AVAIL(txring) < nfrags + 14) {
1556 /* no room -- stop the queue and wait for tx intr */
1557 netif_stop_queue(dev);
1561 /* Queue up checksum + event descriptors, if needed */
1562 if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
1563 csring = mac->cs[mac->last_cs];
1564 mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
1566 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
1569 fill = txring->next_to_fill;
1570 TX_DESC(txring, fill) = mactx;
1571 TX_DESC_INFO(txring, fill).dma = nfrags;
1573 TX_DESC_INFO(txring, fill).skb = skb;
1574 for (i = 0; i <= nfrags; i++) {
1575 TX_DESC(txring, fill+i) =
1576 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1577 TX_DESC_INFO(txring, fill+i).dma = map[i];
1580 /* We have to add an even number of 8-byte entries to the ring
1581 * even if the last one is unused. That means always an odd number
1582 * of pointers + one mactx descriptor.
1587 txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
1589 dev->stats.tx_packets++;
1590 dev->stats.tx_bytes += skb->len;
1592 spin_unlock_irqrestore(&txring->lock, flags);
1594 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
1596 return NETDEV_TX_OK;
1599 spin_unlock_irqrestore(&txring->lock, flags);
1602 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1605 return NETDEV_TX_BUSY;
1608 static void pasemi_mac_set_rx_mode(struct net_device *dev)
1610 const struct pasemi_mac *mac = netdev_priv(dev);
1613 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1615 /* Set promiscuous */
1616 if (dev->flags & IFF_PROMISC)
1617 flags |= PAS_MAC_CFG_PCFG_PR;
1619 flags &= ~PAS_MAC_CFG_PCFG_PR;
1621 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1625 static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1627 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1628 struct net_device *dev = mac->netdev;
1631 pasemi_mac_clean_tx(tx_ring(mac));
1632 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1633 if (pkts < budget) {
1634 /* all done, no more packets present */
1635 netif_rx_complete(dev, napi);
1637 pasemi_mac_restart_rx_intr(mac);
1638 pasemi_mac_restart_tx_intr(mac);
1643 static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
1645 struct pasemi_mac *mac = netdev_priv(dev);
1647 unsigned int rcmdsta = 0;
1651 if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
1654 running = netif_running(dev);
1657 /* Need to stop the interface, clean out all already
1658 * received buffers, free all unused buffers on the RX
1659 * interface ring, then finally re-fill the rx ring with
1660 * the new-size buffers and restart.
1663 napi_disable(&mac->napi);
1664 netif_tx_disable(dev);
1665 pasemi_mac_intf_disable(mac);
1667 rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1668 pasemi_mac_pause_rxint(mac);
1669 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1670 pasemi_mac_free_rx_buffers(mac);
1674 /* Setup checksum channels if large MTU and none already allocated */
1675 if (new_mtu > 1500 && !mac->num_cs) {
1676 pasemi_mac_setup_csrings(mac);
1683 /* Change maxf, i.e. what size frames are accepted.
1684 * Need room for ethernet header and CRC word
1686 reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
1687 reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
1688 reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
1689 write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
1692 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1693 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1697 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1698 rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
1700 rx_ring(mac)->next_to_fill = 0;
1701 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
1703 napi_enable(&mac->napi);
1704 netif_start_queue(dev);
1705 pasemi_mac_intf_enable(mac);
1711 static int __devinit
1712 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1714 struct net_device *dev;
1715 struct pasemi_mac *mac;
1717 DECLARE_MAC_BUF(mac_buf);
1719 err = pci_enable_device(pdev);
1723 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1726 "pasemi_mac: Could not allocate ethernet device.\n");
1728 goto out_disable_device;
1731 pci_set_drvdata(pdev, dev);
1732 SET_NETDEV_DEV(dev, &pdev->dev);
1734 mac = netdev_priv(dev);
1739 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1741 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
1742 NETIF_F_HIGHDMA | NETIF_F_GSO;
1744 mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
1745 mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1746 mac->lro_mgr.lro_arr = mac->lro_desc;
1747 mac->lro_mgr.get_skb_header = get_skb_hdr;
1748 mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1749 mac->lro_mgr.dev = mac->netdev;
1750 mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1751 mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1754 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1755 if (!mac->dma_pdev) {
1756 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1761 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1762 if (!mac->iob_pdev) {
1763 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1768 /* get mac addr from device tree */
1769 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1773 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1775 mac->dma_if = mac_to_intf(mac);
1776 if (mac->dma_if < 0) {
1777 dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
1782 switch (pdev->device) {
1784 mac->type = MAC_TYPE_GMAC;
1787 mac->type = MAC_TYPE_XAUI;
1794 dev->open = pasemi_mac_open;
1795 dev->stop = pasemi_mac_close;
1796 dev->hard_start_xmit = pasemi_mac_start_tx;
1797 dev->set_multicast_list = pasemi_mac_set_rx_mode;
1798 dev->set_mac_address = pasemi_mac_set_mac_addr;
1799 dev->mtu = PE_DEF_MTU;
1800 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1801 mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
1803 dev->change_mtu = pasemi_mac_change_mtu;
1804 dev->ethtool_ops = &pasemi_mac_ethtool_ops;
1809 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1811 /* Enable most messages by default */
1812 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1814 err = register_netdev(dev);
1817 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1820 } else if netif_msg_probe(mac)
1821 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1822 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1823 mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1829 pci_dev_put(mac->iob_pdev);
1831 pci_dev_put(mac->dma_pdev);
1835 pci_disable_device(pdev);
1840 static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1842 struct net_device *netdev = pci_get_drvdata(pdev);
1843 struct pasemi_mac *mac;
1848 mac = netdev_priv(netdev);
1850 unregister_netdev(netdev);
1852 pci_disable_device(pdev);
1853 pci_dev_put(mac->dma_pdev);
1854 pci_dev_put(mac->iob_pdev);
1856 pasemi_dma_free_chan(&mac->tx->chan);
1857 pasemi_dma_free_chan(&mac->rx->chan);
1859 pci_set_drvdata(pdev, NULL);
1860 free_netdev(netdev);
1863 static struct pci_device_id pasemi_mac_pci_tbl[] = {
1864 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1865 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1869 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1871 static struct pci_driver pasemi_mac_driver = {
1872 .name = "pasemi_mac",
1873 .id_table = pasemi_mac_pci_tbl,
1874 .probe = pasemi_mac_probe,
1875 .remove = __devexit_p(pasemi_mac_remove),
1878 static void __exit pasemi_mac_cleanup_module(void)
1880 pci_unregister_driver(&pasemi_mac_driver);
1883 int pasemi_mac_init_module(void)
1887 err = pasemi_dma_init();
1891 return pci_register_driver(&pasemi_mac_driver);
1894 module_init(pasemi_mac_init_module);
1895 module_exit(pasemi_mac_cleanup_module);