3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
36 #include <linux/dmapool.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <asm/semaphore.h>
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
45 return (ring->nr_slots - ring->used_slots);
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
50 assert(slot >= -1 && slot <= ring->nr_slots - 1);
51 if (slot == ring->nr_slots - 1)
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
58 assert(slot >= 0 && slot <= ring->nr_slots - 1);
60 return ring->nr_slots - 1;
64 /* Request a slot for usage. */
66 int request_slot(struct bcm43xx_dmaring *ring)
71 assert(!ring->suspended);
72 assert(free_slots(ring) != 0);
74 slot = next_slot(ring, ring->current_slot);
75 ring->current_slot = slot;
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
81 if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82 netif_stop_queue(ring->bcm->net_dev);
85 #ifdef CONFIG_BCM43XX_DEBUG
86 if (ring->used_slots > ring->max_used_slots)
87 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
93 /* Return a slot to the free slots. */
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
104 if (unlikely(ring->suspended)) {
105 if (free_slots(ring) >= ring->resume_mark) {
107 netif_wake_queue(ring->bcm->net_dev);
113 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
121 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
125 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
134 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
140 dma_unmap_single(&ring->bcm->pci_dev->dev,
144 dma_unmap_single(&ring->bcm->pci_dev->dev,
151 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
157 dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
158 addr, len, DMA_FROM_DEVICE);
162 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
168 dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
169 addr, len, DMA_FROM_DEVICE);
173 void mark_skb_mustfree(struct sk_buff *skb,
176 skb->cb[0] = mustfree;
180 int skb_mustfree(struct sk_buff *skb)
182 return (skb->cb[0] != 0);
185 /* Unmap and free a descriptor buffer. */
187 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
188 struct bcm43xx_dmadesc *desc,
189 struct bcm43xx_dmadesc_meta *meta,
193 if (skb_mustfree(meta->skb)) {
195 dev_kfree_skb_irq(meta->skb);
197 dev_kfree_skb(meta->skb);
201 ieee80211_txb_free(meta->txb);
206 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
208 struct device *dev = &(ring->bcm->pci_dev->dev);
210 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
211 &(ring->dmabase), GFP_KERNEL);
213 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
216 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
217 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
218 "(0x%08x, len: %lu)\n",
219 ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
220 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
221 ring->vbase, ring->dmabase);
224 assert(!(ring->dmabase & 0x000003FF));
225 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
230 static void free_ringmemory(struct bcm43xx_dmaring *ring)
232 struct device *dev = &(ring->bcm->pci_dev->dev);
234 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
235 ring->vbase, ring->dmabase);
238 /* Reset the RX DMA channel */
239 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
246 mmio_base + BCM43xx_DMA_RX_CONTROL,
248 for (i = 0; i < 1000; i++) {
249 value = bcm43xx_read32(bcm,
250 mmio_base + BCM43xx_DMA_RX_STATUS);
251 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
252 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
259 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
266 /* Reset the RX DMA channel */
267 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
273 for (i = 0; i < 1000; i++) {
274 value = bcm43xx_read32(bcm,
275 mmio_base + BCM43xx_DMA_TX_STATUS);
276 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
277 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
278 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
279 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
284 mmio_base + BCM43xx_DMA_TX_CONTROL,
286 for (i = 0; i < 1000; i++) {
287 value = bcm43xx_read32(bcm,
288 mmio_base + BCM43xx_DMA_TX_STATUS);
289 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
290 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
297 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
300 /* ensure the reset is completed. */
306 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
307 struct bcm43xx_dmadesc *desc,
308 struct bcm43xx_dmadesc_meta *meta,
311 struct bcm43xx_rxhdr *rxhdr;
315 const int slot = (int)(desc - ring->vbase);
318 assert(slot >= 0 && slot < ring->nr_slots);
321 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
324 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
325 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
326 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
327 dev_kfree_skb_any(skb);
328 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
329 "(0x%08x, len: %u)\n",
330 dmaaddr, ring->rx_buffersize);
334 meta->dmaaddr = dmaaddr;
335 skb->dev = ring->bcm->net_dev;
336 mark_skb_mustfree(skb, 1);
337 desc_addr = (u32)(dmaaddr + ring->memoffset);
338 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
339 (u32)(ring->rx_buffersize - ring->frameoffset));
340 if (slot == ring->nr_slots - 1)
341 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
342 set_desc_addr(desc, desc_addr);
343 set_desc_ctl(desc, desc_ctl);
345 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
346 rxhdr->frame_length = 0;
352 /* Allocate the initial descbuffers.
353 * This is used for an RX ring only.
355 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
357 int i, err = -ENOMEM;
358 struct bcm43xx_dmadesc *desc;
359 struct bcm43xx_dmadesc_meta *meta;
361 for (i = 0; i < ring->nr_slots; i++) {
362 desc = ring->vbase + i;
363 meta = ring->meta + i;
365 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
369 ring->used_slots = ring->nr_slots;
375 for (i--; i >= 0; i--) {
376 desc = ring->vbase + i;
377 meta = ring->meta + i;
379 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
380 dev_kfree_skb(meta->skb);
385 /* Do initial setup of the DMA controller.
386 * Reset the controller, write the ring busaddress
387 * and switch the "enable" bit on.
389 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
395 /* Set Transmit Control register to "transmit enable" */
396 bcm43xx_write32(ring->bcm,
397 ring->mmio_base + BCM43xx_DMA_TX_CONTROL,
398 BCM43xx_DMA_TXCTRL_ENABLE);
399 /* Set Transmit Descriptor ring address. */
400 bcm43xx_write32(ring->bcm,
401 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
402 ring->dmabase + ring->memoffset);
404 err = alloc_initial_descbuffers(ring);
407 /* Set Receive Control "receive enable" and frame offset */
408 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
409 value |= BCM43xx_DMA_RXCTRL_ENABLE;
410 bcm43xx_write32(ring->bcm,
411 ring->mmio_base + BCM43xx_DMA_RX_CONTROL,
413 /* Set Receive Descriptor ring address. */
414 bcm43xx_write32(ring->bcm,
415 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
416 ring->dmabase + ring->memoffset);
417 /* Init the descriptor pointer. */
418 bcm43xx_write32(ring->bcm,
419 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
427 /* Shutdown the DMA controller. */
428 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
431 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
432 /* Zero out Transmit Descriptor ring address. */
433 bcm43xx_write32(ring->bcm,
434 ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
437 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
438 /* Zero out Receive Descriptor ring address. */
439 bcm43xx_write32(ring->bcm,
440 ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
445 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
447 struct bcm43xx_dmadesc *desc;
448 struct bcm43xx_dmadesc_meta *meta;
451 if (!ring->used_slots)
453 for (i = 0; i < ring->nr_slots; i++) {
454 desc = ring->vbase + i;
455 meta = ring->meta + i;
463 unmap_descbuffer(ring, meta->dmaaddr,
466 unmap_descbuffer(ring, meta->dmaaddr,
467 ring->rx_buffersize, 0);
469 free_descriptor_buffer(ring, desc, meta, 0);
473 /* Main initialization function. */
475 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
476 u16 dma_controller_base,
477 int nr_descriptor_slots,
480 struct bcm43xx_dmaring *ring;
483 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
487 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
492 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
493 #ifdef CONFIG_BCM947XX
494 if (bcm->pci_dev->bus->number == 0)
499 ring->nr_slots = nr_descriptor_slots;
500 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
501 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
502 assert(ring->suspend_mark < ring->resume_mark);
503 ring->mmio_base = dma_controller_base;
506 ring->current_slot = -1;
508 switch (dma_controller_base) {
509 case BCM43xx_MMIO_DMA1_BASE:
510 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
511 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
513 case BCM43xx_MMIO_DMA4_BASE:
514 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
515 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
522 err = alloc_ringmemory(ring);
525 err = dmacontroller_setup(ring);
527 goto err_free_ringmemory;
533 free_ringmemory(ring);
542 /* Main cleanup function. */
543 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
548 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
550 (ring->tx) ? "TX" : "RX",
551 ring->max_used_slots, ring->nr_slots);
552 /* Device IRQs are disabled prior entering this function,
553 * so no need to take care of concurrency with rx handler stuff.
555 dmacontroller_cleanup(ring);
556 free_all_descbuffers(ring);
557 free_ringmemory(ring);
563 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
565 struct bcm43xx_dma *dma = bcm->current_core->dma;
567 bcm43xx_destroy_dmaring(dma->rx_ring1);
568 dma->rx_ring1 = NULL;
569 bcm43xx_destroy_dmaring(dma->rx_ring0);
570 dma->rx_ring0 = NULL;
571 bcm43xx_destroy_dmaring(dma->tx_ring3);
572 dma->tx_ring3 = NULL;
573 bcm43xx_destroy_dmaring(dma->tx_ring2);
574 dma->tx_ring2 = NULL;
575 bcm43xx_destroy_dmaring(dma->tx_ring1);
576 dma->tx_ring1 = NULL;
577 bcm43xx_destroy_dmaring(dma->tx_ring0);
578 dma->tx_ring0 = NULL;
581 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
583 struct bcm43xx_dma *dma = bcm->current_core->dma;
584 struct bcm43xx_dmaring *ring;
587 /* setup TX DMA channels. */
588 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
589 BCM43xx_TXRING_SLOTS, 1);
592 dma->tx_ring0 = ring;
594 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
595 BCM43xx_TXRING_SLOTS, 1);
597 goto err_destroy_tx0;
598 dma->tx_ring1 = ring;
600 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
601 BCM43xx_TXRING_SLOTS, 1);
603 goto err_destroy_tx1;
604 dma->tx_ring2 = ring;
606 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
607 BCM43xx_TXRING_SLOTS, 1);
609 goto err_destroy_tx2;
610 dma->tx_ring3 = ring;
612 /* setup RX DMA channels. */
613 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
614 BCM43xx_RXRING_SLOTS, 0);
616 goto err_destroy_tx3;
617 dma->rx_ring0 = ring;
619 if (bcm->current_core->rev < 5) {
620 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
621 BCM43xx_RXRING_SLOTS, 0);
623 goto err_destroy_rx0;
624 dma->rx_ring1 = ring;
627 dprintk(KERN_INFO PFX "DMA initialized\n");
633 bcm43xx_destroy_dmaring(dma->rx_ring0);
634 dma->rx_ring0 = NULL;
636 bcm43xx_destroy_dmaring(dma->tx_ring3);
637 dma->tx_ring3 = NULL;
639 bcm43xx_destroy_dmaring(dma->tx_ring2);
640 dma->tx_ring2 = NULL;
642 bcm43xx_destroy_dmaring(dma->tx_ring1);
643 dma->tx_ring1 = NULL;
645 bcm43xx_destroy_dmaring(dma->tx_ring0);
646 dma->tx_ring0 = NULL;
650 /* Generate a cookie for the TX header. */
651 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
656 /* Use the upper 4 bits of the cookie as
657 * DMA controller ID and store the slot number
658 * in the lower 12 bits
660 switch (ring->mmio_base) {
663 case BCM43xx_MMIO_DMA1_BASE:
665 case BCM43xx_MMIO_DMA2_BASE:
668 case BCM43xx_MMIO_DMA3_BASE:
671 case BCM43xx_MMIO_DMA4_BASE:
675 assert(((u16)slot & 0xF000) == 0x0000);
681 /* Inspect a cookie and find out to which controller/slot it belongs. */
683 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
684 u16 cookie, int *slot)
686 struct bcm43xx_dma *dma = bcm->current_core->dma;
687 struct bcm43xx_dmaring *ring = NULL;
689 switch (cookie & 0xF000) {
691 ring = dma->tx_ring0;
694 ring = dma->tx_ring1;
697 ring = dma->tx_ring2;
700 ring = dma->tx_ring3;
705 *slot = (cookie & 0x0FFF);
706 assert(*slot >= 0 && *slot < ring->nr_slots);
711 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
714 /* Everything is ready to start. Buffers are DMA mapped and
715 * associated with slots.
716 * "slot" is the last slot of the new frame we want to transmit.
717 * Close your seat belts now, please.
720 slot = next_slot(ring, slot);
721 bcm43xx_write32(ring->bcm,
722 ring->mmio_base + BCM43xx_DMA_TX_DESC_INDEX,
723 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
726 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
728 struct ieee80211_txb *txb,
732 struct bcm43xx_dmadesc *desc;
733 struct bcm43xx_dmadesc_meta *meta;
737 assert(skb_shinfo(skb)->nr_frags == 0);
739 slot = request_slot(ring);
740 desc = ring->vbase + slot;
741 meta = ring->meta + slot;
744 /* Save the txb pointer for freeing in xmitstatus IRQ */
748 /* Add a device specific TX header. */
749 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
750 /* Reserve enough headroom for the device tx header. */
751 __skb_push(skb, sizeof(struct bcm43xx_txhdr));
752 /* Now calculate and add the tx header.
753 * The tx header includes the PLCP header.
755 bcm43xx_generate_txhdr(ring->bcm,
756 (struct bcm43xx_txhdr *)skb->data,
757 skb->data + sizeof(struct bcm43xx_txhdr),
758 skb->len - sizeof(struct bcm43xx_txhdr),
760 generate_cookie(ring, slot));
763 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
764 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
765 return_slot(ring, slot);
766 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
767 "(0x%08x, len: %u)\n",
768 meta->dmaaddr, skb->len);
772 desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
773 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
774 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
775 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
776 (u32)(meta->skb->len - ring->frameoffset));
777 if (slot == ring->nr_slots - 1)
778 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
780 set_desc_ctl(desc, desc_ctl);
781 set_desc_addr(desc, desc_addr);
782 /* Now transfer the whole frame. */
783 dmacontroller_poke_tx(ring, slot);
788 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
789 struct ieee80211_txb *txb)
791 /* We just received a packet from the kernel network subsystem.
792 * Add headers and DMA map the memory. Poke
793 * the device to send the stuff.
794 * Note that this is called from atomic context.
796 struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
801 if (unlikely(free_slots(ring) < txb->nr_frags)) {
802 /* The queue should be stopped,
803 * if we are low on free slots.
804 * If this ever triggers, we have to lower the suspend_mark.
806 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
810 for (i = 0; i < txb->nr_frags; i++) {
811 skb = txb->fragments[i];
812 /* We do not free the skb, as it is freed as
813 * part of the txb freeing.
815 mark_skb_mustfree(skb, 0);
816 dma_tx_fragment(ring, skb, txb, i);
817 //TODO: handle failure of dma_tx_fragment
823 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
824 struct bcm43xx_xmitstatus *status)
826 struct bcm43xx_dmaring *ring;
827 struct bcm43xx_dmadesc *desc;
828 struct bcm43xx_dmadesc_meta *meta;
829 int is_last_fragment;
832 ring = parse_cookie(bcm, status->cookie, &slot);
835 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
837 assert(slot >= 0 && slot < ring->nr_slots);
838 desc = ring->vbase + slot;
839 meta = ring->meta + slot;
841 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
842 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
843 free_descriptor_buffer(ring, desc, meta, 1);
844 /* Everything belonging to the slot is unmapped
845 * and freed, so we can return it.
847 return_slot(ring, slot);
849 if (is_last_fragment)
851 slot = next_slot(ring, slot);
853 bcm->stats.last_tx = jiffies;
856 static void dma_rx(struct bcm43xx_dmaring *ring,
859 struct bcm43xx_dmadesc *desc;
860 struct bcm43xx_dmadesc_meta *meta;
861 struct bcm43xx_rxhdr *rxhdr;
867 desc = ring->vbase + *slot;
868 meta = ring->meta + *slot;
870 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
873 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
874 /* We received an xmit status. */
875 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
876 struct bcm43xx_xmitstatus stat;
878 stat.cookie = le16_to_cpu(hw->cookie);
879 stat.flags = hw->flags;
880 stat.cnt1 = hw->cnt1;
881 stat.cnt2 = hw->cnt2;
882 stat.seq = le16_to_cpu(hw->seq);
883 stat.unknown = le16_to_cpu(hw->unknown);
885 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
886 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
887 /* recycle the descriptor buffer. */
888 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
892 rxhdr = (struct bcm43xx_rxhdr *)skb->data;
893 len = le16_to_cpu(rxhdr->frame_length);
900 len = le16_to_cpu(rxhdr->frame_length);
901 } while (len == 0 && i++ < 5);
902 if (unlikely(len == 0)) {
903 /* recycle the descriptor buffer. */
904 sync_descbuffer_for_device(ring, meta->dmaaddr,
905 ring->rx_buffersize);
909 if (unlikely(len > ring->rx_buffersize)) {
910 /* The data did not fit into one descriptor buffer
911 * and is split over multiple buffers.
912 * This should never happen, as we try to allocate buffers
913 * big enough. So simply ignore this packet.
919 desc = ring->vbase + *slot;
920 meta = ring->meta + *slot;
921 /* recycle the descriptor buffer. */
922 sync_descbuffer_for_device(ring, meta->dmaaddr,
923 ring->rx_buffersize);
924 *slot = next_slot(ring, *slot);
926 tmp -= ring->rx_buffersize;
930 printkl(KERN_ERR PFX "DMA RX buffer too small "
931 "(len: %u, buffer: %u, nr-dropped: %d)\n",
932 len, ring->rx_buffersize, cnt);
935 len -= IEEE80211_FCS_LEN;
937 dmaaddr = meta->dmaaddr;
938 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
940 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
941 sync_descbuffer_for_device(ring, dmaaddr,
942 ring->rx_buffersize);
946 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
947 skb_put(skb, len + ring->frameoffset);
948 skb_pull(skb, ring->frameoffset);
950 err = bcm43xx_rx(ring->bcm, skb, rxhdr);
952 dev_kfree_skb_irq(skb);
960 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
964 int slot, current_slot;
965 #ifdef CONFIG_BCM43XX_DEBUG
970 status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
971 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
972 current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
973 assert(current_slot >= 0 && current_slot < ring->nr_slots);
975 slot = ring->current_slot;
976 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
978 #ifdef CONFIG_BCM43XX_DEBUG
979 if (++used_slots > ring->max_used_slots)
980 ring->max_used_slots = used_slots;
983 bcm43xx_write32(ring->bcm,
984 ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
985 (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
986 ring->current_slot = slot;
989 /* vim: set ts=8 sw=8 sts=8: */