struct queue_entry *entry)
{
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
u32 word;
rt2x00_desc_read(entry_priv->desc, 2, &word);
- rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH,
- entry->queue->data_size);
+ rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len);
rt2x00_desc_write(entry_priv->desc, 2, word);
rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
+ rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
rt2x00_desc_read(entry_priv->desc, 0, &word);
* Start writing the descriptor words.
*/
rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
+ rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2400pci_probe_hw_mode(rt2x00dev);
/*
- * This device requires the atim queue
+ * This device requires the atim queue and DMA-mapped skbs.
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
/*
* Set the rssi offset.
* Write entire beacon with descriptor to register,
* and kick the beacon generator.
*/
- memcpy(entry_priv->data, skb->data, skb->len);
+ rt2x00queue_map_txskb(rt2x00dev, intf->beacon->skb);
rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
struct queue_entry *entry)
{
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
u32 word;
rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
+ rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
rt2x00_desc_read(entry_priv->desc, 0, &word);
* Start writing the descriptor words.
*/
rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
+ rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2500pci_probe_hw_mode(rt2x00dev);
/*
- * This device requires the atim queue
+ * This device requires the atim queue and DMA-mapped skbs.
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
/*
* Set the rssi offset.
* Write entire beacon with descriptor to register,
* and kick the beacon generator.
*/
- memcpy(entry_priv->data, skb->data, skb->len);
+ rt2x00queue_map_txskb(rt2x00dev, intf->beacon->skb);
rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
DRIVER_REQUIRE_BEACON_GUARD,
DRIVER_REQUIRE_ATIM_QUEUE,
DRIVER_REQUIRE_SCHEDULED,
+ DRIVER_REQUIRE_DMA,
/*
* Driver configuration
}
/**
- * rt2x00queue_alloc_skb - allocate a skb.
+ * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
* @queue: The queue for which the skb will be applicable.
*/
-struct sk_buff *rt2x00queue_alloc_skb(struct data_queue *queue);
+struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry);
+
+/**
+ * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @skb: The skb to map.
+ */
+void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
+
+/**
+ * rt2x00queue_unmap_skb - Unmap a skb from DMA.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @skb: The skb to unmap.
+ */
+void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
/**
* rt2x00queue_free_skb - free a skb
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
* @skb: The skb to free.
*/
-void rt2x00queue_free_skb(struct sk_buff *skb);
+void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
/**
* rt2x00queue_create_tx_descriptor - Create TX descriptor from mac80211 input
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc);
-void rt2x00lib_rxdone(struct queue_entry *entry,
- struct rxdone_entry_desc *rxdesc);
+void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry);
/*
* mac80211 handlers.
static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct rt2x00_dev *rt2x00dev = data;
struct rt2x00_intf *intf = vif_to_intf(vif);
if (vif->type != IEEE80211_IF_TYPE_AP &&
/*
* Clean up the beacon skb.
*/
- dev_kfree_skb_irq(intf->beacon->skb);
+ rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
intf->beacon->skb = NULL;
spin_lock(&intf->lock);
}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
-void rt2x00lib_rxdone(struct queue_entry *entry,
- struct rxdone_entry_desc *rxdesc)
+void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct rxdone_entry_desc rxdesc;
+ struct sk_buff *skb;
struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status;
- unsigned int header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
const struct rt2x00_rate *rate;
+ unsigned int header_size;
unsigned int align;
unsigned int i;
int idx = -1;
+ /*
+ * Allocate a new sk_buffer. If no new buffer available, drop the
+ * received frame and reuse the existing buffer.
+ */
+ skb = rt2x00queue_alloc_rxskb(rt2x00dev, entry);
+ if (!skb)
+ return;
+
+ /*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
+
+ /*
+ * Extract the RXD details.
+ */
+ memset(&rxdesc, 0, sizeof(rxdesc));
+ rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
+
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
*/
+ header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
align = ((unsigned long)(entry->skb->data + header_size)) & 3;
if (align) {
skb_push(entry->skb, align);
/* Move entire frame in 1 command */
memmove(entry->skb->data, entry->skb->data + align,
- rxdesc->size);
+ rxdesc.size);
}
/* Update data pointers, trim buffer to correct size */
- skb_trim(entry->skb, rxdesc->size);
+ skb_trim(entry->skb, rxdesc.size);
/*
* Update RX statistics.
for (i = 0; i < sband->n_bitrates; i++) {
rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
- if (((rxdesc->dev_flags & RXDONE_SIGNAL_PLCP) &&
- (rate->plcp == rxdesc->signal)) ||
- (!(rxdesc->dev_flags & RXDONE_SIGNAL_PLCP) &&
- (rate->bitrate == rxdesc->signal))) {
+ if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
+ (rate->plcp == rxdesc.signal)) ||
+ (!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
+ (rate->bitrate == rxdesc.signal))) {
idx = i;
break;
}
if (idx < 0) {
WARNING(rt2x00dev, "Frame received with unrecognized signal,"
- "signal=0x%.2x, plcp=%d.\n", rxdesc->signal,
- !!(rxdesc->dev_flags & RXDONE_SIGNAL_PLCP));
+ "signal=0x%.2x, plcp=%d.\n", rxdesc.signal,
+ !!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP));
idx = 0;
}
*/
hdr = (struct ieee80211_hdr *)entry->skb->data;
if (ieee80211_is_beacon(hdr->frame_control) &&
- (rxdesc->dev_flags & RXDONE_MY_BSS))
- rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc->rssi);
+ (rxdesc.dev_flags & RXDONE_MY_BSS))
+ rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi);
rt2x00dev->link.qual.rx_success++;
rx_status->rate_idx = idx;
rx_status->qual =
- rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi);
- rx_status->signal = rxdesc->rssi;
- rx_status->flag = rxdesc->flags;
+ rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc.rssi);
+ rx_status->signal = rxdesc.rssi;
+ rx_status->flag = rxdesc.flags;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
/*
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status);
- entry->skb = NULL;
+
+ /*
+ * Replace the skb with the freshly allocated one.
+ */
+ entry->skb = skb;
}
EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
skbdesc->desc_len = entry->queue->desc_size;
skbdesc->entry = entry;
- memcpy(entry_priv->data, entry->skb->data, entry->skb->len);
+ rt2x00queue_map_txskb(entry->queue->rt2x00dev, entry->skb);
return 0;
}
/*
* TX/RX data handlers.
*/
-static void rt2x00pci_rxdone_entry(struct rt2x00_dev *rt2x00dev,
- struct queue_entry *entry)
-{
- struct sk_buff *skb;
- struct skb_frame_desc *skbdesc;
- struct rxdone_entry_desc rxdesc;
- struct queue_entry_priv_pci *entry_priv = entry->priv_data;
-
- /*
- * Allocate a new sk_buffer. If no new buffer available, drop the
- * received frame and reuse the existing buffer.
- */
- skb = rt2x00queue_alloc_skb(entry->queue);
- if (!skb)
- return;
-
- /*
- * Extract the RXD details.
- */
- memset(&rxdesc, 0, sizeof(rxdesc));
- rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
-
- /*
- * Copy the received data to the entries' skb.
- */
- memcpy(entry->skb->data, entry_priv->data, rxdesc.size);
- skb_trim(entry->skb, rxdesc.size);
-
- /*
- * Fill in skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
- skbdesc->entry = entry;
-
- /*
- * Send the frame to rt2x00lib for further processing.
- */
- rt2x00lib_rxdone(entry, &rxdesc);
-
- /*
- * Replace the entries' skb with the newly allocated one.
- */
- entry->skb = skb;
-}
-
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->rx;
struct queue_entry *entry;
struct queue_entry_priv_pci *entry_priv;
+ struct skb_frame_desc *skbdesc;
u32 word;
while (1) {
if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
break;
- rt2x00pci_rxdone_entry(rt2x00dev, entry);
+ /*
+ * Fill in desc fields of the skb descriptor
+ */
+ skbdesc = get_skb_frame_desc(entry->skb);
+ skbdesc->desc = entry_priv->desc;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * Send the frame to rt2x00lib for further processing.
+ */
+ rt2x00lib_rxdone(rt2x00dev, entry);
- if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
- rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
- rt2x00_desc_write(entry_priv->desc, 0, word);
- }
+ /*
+ * Reset the RXD for this entry.
+ */
+ rt2x00dev->ops->lib->init_rxentry(rt2x00dev, entry);
rt2x00queue_index_inc(queue, Q_INDEX);
}
enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
u32 word;
+ /*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
+
rt2x00lib_txdone(entry, txdesc);
/*
/*
* Device initialization handlers.
*/
-#define desc_size(__queue) \
-({ \
- ((__queue)->limit * (__queue)->desc_size);\
-})
-
-#define data_size(__queue) \
-({ \
- ((__queue)->limit * (__queue)->data_size);\
-})
-
-#define dma_size(__queue) \
-({ \
- data_size(__queue) + desc_size(__queue);\
-})
-
-#define desc_offset(__queue, __base, __i) \
-({ \
- (__base) + data_size(__queue) + \
- ((__i) * (__queue)->desc_size); \
-})
-
-#define data_offset(__queue, __base, __i) \
-({ \
- (__base) + \
- ((__i) * (__queue)->data_size); \
-})
-
static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue)
{
/*
* Allocate DMA memory for descriptor and buffer.
*/
- addr = dma_alloc_coherent(rt2x00dev->dev, dma_size(queue), &dma,
- GFP_KERNEL | GFP_DMA);
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ &dma, GFP_KERNEL | GFP_DMA);
if (!addr)
return -ENOMEM;
- memset(addr, 0, dma_size(queue));
+ memset(addr, 0, queue->limit * queue->desc_size);
/*
* Initialize all queue entries to contain valid addresses.
*/
for (i = 0; i < queue->limit; i++) {
entry_priv = queue->entries[i].priv_data;
- entry_priv->desc = desc_offset(queue, addr, i);
- entry_priv->desc_dma = desc_offset(queue, dma, i);
- entry_priv->data = data_offset(queue, addr, i);
- entry_priv->data_dma = data_offset(queue, dma, i);
+ entry_priv->desc = addr + i * queue->desc_size;
+ entry_priv->desc_dma = dma + i * queue->desc_size;
}
return 0;
struct queue_entry_priv_pci *entry_priv =
queue->entries[0].priv_data;
- if (entry_priv->data)
- dma_free_coherent(rt2x00dev->dev, dma_size(queue),
- entry_priv->data, entry_priv->data_dma);
- entry_priv->data = NULL;
+ if (entry_priv->desc)
+ dma_free_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ entry_priv->desc, entry_priv->desc_dma);
+ entry_priv->desc = NULL;
}
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
struct queue_entry_priv_pci {
__le32 *desc;
dma_addr_t desc_dma;
-
- void *data;
- dma_addr_t data_dma;
};
/**
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include "rt2x00.h"
#include "rt2x00lib.h"
-struct sk_buff *rt2x00queue_alloc_skb(struct data_queue *queue)
+struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry)
{
- struct sk_buff *skb;
unsigned int frame_size;
unsigned int reserved_size;
+ struct sk_buff *skb;
+ struct skb_frame_desc *skbdesc;
/*
* The frame size includes descriptor size, because the
* hardware directly receive the frame into the skbuffer.
*/
- frame_size = queue->data_size + queue->desc_size;
+ frame_size = entry->queue->data_size + entry->queue->desc_size;
/*
* Reserve a few bytes extra headroom to allow drivers some moving
skb_reserve(skb, reserved_size);
skb_put(skb, frame_size);
+ /*
+ * Populate skbdesc.
+ */
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->entry = entry;
+
+ if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
+ skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
+ skb->data,
+ skb->len,
+ DMA_FROM_DEVICE);
+ skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
+ }
+
return skb;
}
-EXPORT_SYMBOL_GPL(rt2x00queue_alloc_skb);
+EXPORT_SYMBOL_GPL(rt2x00queue_alloc_rxskb);
-void rt2x00queue_free_skb(struct sk_buff *skb)
+void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+
+ skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
+
+void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+
+ if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
+ dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ DMA_FROM_DEVICE);
+ skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
+ }
+
+ if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
+ dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ DMA_TO_DEVICE);
+ skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
+
+void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+
+ if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
+ dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ DMA_FROM_DEVICE);
+ }
+
+ if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
+ dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
+ DMA_TO_DEVICE);
+ }
+
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL_GPL(rt2x00queue_free_skb);
return 0;
}
-static void rt2x00queue_free_skbs(struct data_queue *queue)
+static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
{
unsigned int i;
for (i = 0; i < queue->limit; i++) {
if (queue->entries[i].skb)
- rt2x00queue_free_skb(queue->entries[i].skb);
+ rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
}
}
-static int rt2x00queue_alloc_skbs(struct data_queue *queue)
+static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
{
unsigned int i;
struct sk_buff *skb;
for (i = 0; i < queue->limit; i++) {
- skb = rt2x00queue_alloc_skb(queue);
+ skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
if (!skb)
goto exit;
-
queue->entries[i].skb = skb;
}
return 0;
exit:
- rt2x00queue_free_skbs(queue);
+ rt2x00queue_free_skbs(rt2x00dev, queue);
return -ENOMEM;
}
goto exit;
}
- status = rt2x00queue_alloc_skbs(rt2x00dev->rx);
+ status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
if (status)
goto exit;
{
struct data_queue *queue;
- rt2x00queue_free_skbs(rt2x00dev->rx);
+ rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
queue_for_each(rt2x00dev, queue) {
kfree(queue->entries);
* enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
*
*/
-//enum skb_frame_desc_flags {
-// TEMPORARILY EMPTY
-//};
+enum skb_frame_desc_flags {
+ SKBDESC_DMA_MAPPED_RX = (1 << 0),
+ SKBDESC_DMA_MAPPED_TX = (1 << 1),
+};
/**
* struct skb_frame_desc: Descriptor information for the skb buffer
* this structure should not exceed the size of that array (40 bytes).
*
* @flags: Frame flags, see &enum skb_frame_desc_flags.
- * @data: Pointer to data part of frame (Start of ieee80211 header).
+ * @desc_len: Length of the frame descriptor.
* @desc: Pointer to descriptor part of the frame.
* Note that this pointer could point to something outside
* of the scope of the skb->data pointer.
- * @data_len: Length of the frame data.
- * @desc_len: Length of the frame descriptor.
+ * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
* @entry: The entry to which this sk buffer belongs.
*/
struct skb_frame_desc {
unsigned int flags;
- void *desc;
unsigned int desc_len;
+ void *desc;
+
+ dma_addr_t skb_dma;
struct queue_entry *entry;
};
{
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct sk_buff *skb;
- struct skb_frame_desc *skbdesc;
- struct rxdone_entry_desc rxdesc;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
u8 rxd[32];
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
goto skip_entry;
/*
- * Fill in skb descriptor
+ * Fill in desc fields of the skb descriptor
*/
- skbdesc = get_skb_frame_desc(entry->skb);
- memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = entry;
skbdesc->desc = rxd;
skbdesc->desc_len = entry->queue->desc_size;
- memset(&rxdesc, 0, sizeof(rxdesc));
- rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
-
- /*
- * Allocate a new sk buffer to replace the current one.
- * If allocation fails, we should drop the current frame
- * so we can recycle the existing sk buffer for the new frame.
- */
- skb = rt2x00queue_alloc_skb(entry->queue);
- if (!skb)
- goto skip_entry;
-
/*
* Send the frame to rt2x00lib for further processing.
*/
- rt2x00lib_rxdone(entry, &rxdesc);
+ rt2x00lib_rxdone(rt2x00dev, entry);
/*
- * Replace current entry's skb with the newly allocated one,
- * and reinitialize the urb.
+ * Reinitialize the urb.
*/
- entry->skb = skb;
urb->transfer_buffer = entry->skb->data;
urb->transfer_buffer_length = entry->skb->len;
struct queue_entry *entry)
{
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
u32 word;
rt2x00_desc_read(entry_priv->desc, 5, &word);
rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
- entry_priv->data_dma);
+ skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 5, word);
rt2x00_desc_read(entry_priv->desc, 0, &word);
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
__le32 *txd = skbdesc->desc;
u32 word;
rt2x00_desc_read(txd, 6, &word);
rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
- entry_priv->data_dma);
+ skbdesc->skb_dma);
rt2x00_desc_write(txd, 6, word);
if (skbdesc->desc_len > TXINFO_SIZE) {
rt61pci_probe_hw_mode(rt2x00dev);
/*
- * This device requires firmware.
+ * This device requires firmware and DMA mapped skbs.
*/
__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
/*
* Set the rssi offset.