-rt2x00lib-objs := rt2x00dev.o rt2x00mac.o rt2x00config.o
+rt2x00lib-objs := rt2x00dev.o rt2x00mac.o rt2x00config.o rt2x00queue.o
ifeq ($(CONFIG_RT2X00_LIB_DEBUGFS),y)
rt2x00lib-objs += rt2x00debug.o
}
static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_tx_queue_params *params)
+ const int cw_min, const int cw_max)
{
u32 reg;
rt2x00pci_register_read(rt2x00dev, CSR11, ®);
- rt2x00_set_field32(®, CSR11_CWMIN, params->cw_min);
- rt2x00_set_field32(®, CSR11_CWMAX, params->cw_max);
+ rt2x00_set_field32(®, CSR11_CWMIN, cw_min);
+ rt2x00_set_field32(®, CSR11_CWMAX, cw_max);
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
}
* Initialization functions.
*/
static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word;
- rt2x00_desc_read(rxd, 2, &word);
- rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->ring->data_size);
- rt2x00_desc_write(rxd, 2, word);
+ rt2x00_desc_read(priv_rx->desc, 2, &word);
+ rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->queue->data_size);
+ rt2x00_desc_write(priv_rx->desc, 2, word);
- rt2x00_desc_read(rxd, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry->data_dma);
- rt2x00_desc_write(rxd, 1, word);
+ rt2x00_desc_read(priv_rx->desc, 1, &word);
+ rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->dma);
+ rt2x00_desc_write(priv_rx->desc, 1, word);
- rt2x00_desc_read(rxd, 0, &word);
+ rt2x00_desc_read(priv_rx->desc, 0, &word);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
- rt2x00_desc_write(rxd, 0, word);
+ rt2x00_desc_write(priv_rx->desc, 0, word);
}
static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *txd = entry->priv;
+ struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
u32 word;
- rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry->data_dma);
- rt2x00_desc_write(txd, 1, word);
+ rt2x00_desc_read(priv_tx->desc, 1, &word);
+ rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->dma);
+ rt2x00_desc_write(priv_tx->desc, 1, word);
- rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, entry->ring->data_size);
- rt2x00_desc_write(txd, 2, word);
+ rt2x00_desc_read(priv_tx->desc, 2, &word);
+ rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH,
+ entry->queue->data_size);
+ rt2x00_desc_write(priv_tx->desc, 2, word);
- rt2x00_desc_read(txd, 0, &word);
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
- rt2x00_desc_write(txd, 0, word);
+ rt2x00_desc_write(priv_tx->desc, 0, word);
}
-static int rt2400pci_init_rings(struct rt2x00_dev *rt2x00dev)
+static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
{
+ struct queue_entry_priv_pci_rx *priv_rx;
+ struct queue_entry_priv_pci_tx *priv_tx;
u32 reg;
/*
* Initialize registers.
*/
rt2x00pci_register_read(rt2x00dev, TXCSR2, ®);
- rt2x00_set_field32(®, TXCSR2_TXD_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size);
- rt2x00_set_field32(®, TXCSR2_NUM_TXD,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit);
- rt2x00_set_field32(®, TXCSR2_NUM_ATIM,
- rt2x00dev->bcn[1].stats.limit);
- rt2x00_set_field32(®, TXCSR2_NUM_PRIO,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit);
+ rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
+ rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
+ rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+ rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
+ priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR3, ®);
- rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma);
+ rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
+ priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR5, ®);
- rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma);
+ rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
+ priv_tx = rt2x00dev->bcn[1].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, ®);
- rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER,
- rt2x00dev->bcn[1].data_dma);
+ rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
+ priv_tx = rt2x00dev->bcn[0].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, ®);
- rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER,
- rt2x00dev->bcn[0].data_dma);
+ rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
rt2x00pci_register_read(rt2x00dev, RXCSR1, ®);
rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
- rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit);
+ rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
+ priv_rx = rt2x00dev->rx->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, RXCSR2, ®);
- rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER,
- rt2x00dev->rx->data_dma);
+ rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
return 0;
/*
* Initialize all registers.
*/
- if (rt2400pci_init_rings(rt2x00dev) ||
+ if (rt2400pci_init_queues(rt2x00dev) ||
rt2400pci_init_registers(rt2x00dev) ||
rt2400pci_init_bbp(rt2x00dev)) {
ERROR(rt2x00dev, "Register initialization failed.\n");
*/
static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control)
{
- struct skb_desc *skbdesc = get_skb_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
u32 word;
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 4, &word);
- rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, desc->length_low);
+ rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
- rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, desc->length_high);
+ rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags));
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &desc->flags));
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags));
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_RTS,
- test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs);
+ test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
!!(control->flags &
IEEE80211_TXCTL_LONG_RETRY_LIMIT));
/*
* RX control handlers
*/
-static void rt2400pci_fill_rxdone(struct data_entry *entry,
- struct rxdata_entry_desc *desc)
+static void rt2400pci_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word0;
u32 word2;
- rt2x00_desc_read(rxd, 0, &word0);
- rt2x00_desc_read(rxd, 2, &word2);
+ rt2x00_desc_read(priv_rx->desc, 0, &word0);
+ rt2x00_desc_read(priv_rx->desc, 2, &word2);
- desc->flags = 0;
+ rxdesc->flags = 0;
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
- desc->flags |= RX_FLAG_FAILED_FCS_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
- desc->flags |= RX_FLAG_FAILED_PLCP_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
/*
* Obtain the status about this packet.
*/
- desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL);
- desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) -
- entry->ring->rt2x00dev->rssi_offset;
- desc->ofdm = 0;
- desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
- desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL);
+ rxdesc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->ofdm = 0;
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+ rxdesc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
}
/*
* Interrupt functions.
*/
-static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue)
+static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
+ const enum ieee80211_tx_queue queue_idx)
{
- struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue);
- struct data_entry *entry;
- __le32 *txd;
+ struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ struct queue_entry_priv_pci_tx *priv_tx;
+ struct queue_entry *entry;
+ struct txdone_entry_desc txdesc;
u32 word;
- int tx_status;
- int retry;
- while (!rt2x00_ring_empty(ring)) {
- entry = rt2x00_get_data_entry_done(ring);
- txd = entry->priv;
- rt2x00_desc_read(txd, 0, &word);
+ while (!rt2x00queue_empty(queue)) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+ priv_tx = entry->priv_data;
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
/*
* Obtain the status about this packet.
*/
- tx_status = rt2x00_get_field32(word, TXD_W0_RESULT);
- retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
+ txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT);
+ txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry);
+ rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
}
}
rt2400pci_probe_hw_mode(rt2x00dev);
/*
- * This device requires the beacon ring
+ * This device requires the atim queue
*/
- __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
/*
* Set the rssi offset.
/*
* Write configuration to register.
*/
- rt2400pci_config_cw(rt2x00dev, &rt2x00dev->tx->tx_params);
+ rt2400pci_config_cw(rt2x00dev,
+ rt2x00dev->tx->cw_min, rt2x00dev->tx->cw_max);
return 0;
}
.config = rt2400pci_config,
};
+static const struct data_queue_desc rt2400pci_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = RXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_rx),
+};
+
+static const struct data_queue_desc rt2400pci_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
+static const struct data_queue_desc rt2400pci_queue_bcn = {
+ .entry_num = BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
+static const struct data_queue_desc rt2400pci_queue_atim = {
+ .entry_num = ATIM_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
static const struct rt2x00_ops rt2400pci_ops = {
.name = KBUILD_MODNAME,
- .rxd_size = RXD_DESC_SIZE,
- .txd_size = TXD_DESC_SIZE,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
+ .rx = &rt2400pci_queue_rx,
+ .tx = &rt2400pci_queue_tx,
+ .bcn = &rt2400pci_queue_bcn,
+ .atim = &rt2400pci_queue_atim,
.lib = &rt2400pci_rt2x00_ops,
.hw = &rt2400pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
static void rt2500pci_config_type(struct rt2x00_dev *rt2x00dev, const int type,
const int tsf_sync)
{
+ struct data_queue *queue =
+ rt2x00queue_get_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON);
u32 reg;
rt2x00pci_register_write(rt2x00dev, CSR14, 0);
rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®);
rt2x00_set_field32(®, BCNCSR1_PRELOAD,
PREAMBLE + get_duration(IEEE80211_HEADER, 20));
- rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN,
- rt2x00lib_get_ring(rt2x00dev,
- IEEE80211_TX_QUEUE_BEACON)
- ->tx_params.cw_min);
+ rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, queue->cw_min);
rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg);
/*
* Initialization functions.
*/
static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word;
- rt2x00_desc_read(rxd, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry->data_dma);
- rt2x00_desc_write(rxd, 1, word);
+ rt2x00_desc_read(priv_rx->desc, 1, &word);
+ rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->dma);
+ rt2x00_desc_write(priv_rx->desc, 1, word);
- rt2x00_desc_read(rxd, 0, &word);
+ rt2x00_desc_read(priv_rx->desc, 0, &word);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
- rt2x00_desc_write(rxd, 0, word);
+ rt2x00_desc_write(priv_rx->desc, 0, word);
}
static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *txd = entry->priv;
+ struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
u32 word;
- rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry->data_dma);
- rt2x00_desc_write(txd, 1, word);
+ rt2x00_desc_read(priv_tx->desc, 1, &word);
+ rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->dma);
+ rt2x00_desc_write(priv_tx->desc, 1, word);
- rt2x00_desc_read(txd, 0, &word);
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
- rt2x00_desc_write(txd, 0, word);
+ rt2x00_desc_write(priv_tx->desc, 0, word);
}
-static int rt2500pci_init_rings(struct rt2x00_dev *rt2x00dev)
+static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
{
+ struct queue_entry_priv_pci_rx *priv_rx;
+ struct queue_entry_priv_pci_tx *priv_tx;
u32 reg;
/*
* Initialize registers.
*/
rt2x00pci_register_read(rt2x00dev, TXCSR2, ®);
- rt2x00_set_field32(®, TXCSR2_TXD_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size);
- rt2x00_set_field32(®, TXCSR2_NUM_TXD,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit);
- rt2x00_set_field32(®, TXCSR2_NUM_ATIM,
- rt2x00dev->bcn[1].stats.limit);
- rt2x00_set_field32(®, TXCSR2_NUM_PRIO,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit);
+ rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
+ rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
+ rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+ rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
+ priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR3, ®);
- rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma);
+ rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
+ priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR5, ®);
- rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma);
+ rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
+ priv_tx = rt2x00dev->bcn[1].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, ®);
- rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER,
- rt2x00dev->bcn[1].data_dma);
+ rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
+ priv_tx = rt2x00dev->bcn[0].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, ®);
- rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER,
- rt2x00dev->bcn[0].data_dma);
+ rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
rt2x00pci_register_read(rt2x00dev, RXCSR1, ®);
rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
- rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit);
+ rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
+ priv_rx = rt2x00dev->rx->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, RXCSR2, ®);
- rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER,
- rt2x00dev->rx->data_dma);
+ rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
return 0;
/*
* Initialize all registers.
*/
- if (rt2500pci_init_rings(rt2x00dev) ||
+ if (rt2500pci_init_queues(rt2x00dev) ||
rt2500pci_init_registers(rt2x00dev) ||
rt2500pci_init_bbp(rt2x00dev)) {
ERROR(rt2x00dev, "Register initialization failed.\n");
*/
static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control)
{
- struct skb_desc *skbdesc = get_skb_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
u32 word;
*/
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
- rt2x00_set_field32(&word, TXD_W2_AIFS, desc->aifs);
- rt2x00_set_field32(&word, TXD_W2_CWMIN, desc->cw_min);
- rt2x00_set_field32(&word, TXD_W2_CWMAX, desc->cw_max);
+ rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
+ rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min);
+ rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service);
- rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, desc->length_low);
- rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, desc->length_high);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 10, &word);
rt2x00_set_field32(&word, TXD_W10_RTS,
- test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags));
+ test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_desc_write(txd, 10, word);
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags));
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &desc->flags));
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags));
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
- test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags));
+ test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
- rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
!!(control->flags &
IEEE80211_TXCTL_LONG_RETRY_LIMIT));
/*
* RX control handlers
*/
-static void rt2500pci_fill_rxdone(struct data_entry *entry,
- struct rxdata_entry_desc *desc)
+static void rt2500pci_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word0;
u32 word2;
- rt2x00_desc_read(rxd, 0, &word0);
- rt2x00_desc_read(rxd, 2, &word2);
+ rt2x00_desc_read(priv_rx->desc, 0, &word0);
+ rt2x00_desc_read(priv_rx->desc, 2, &word2);
- desc->flags = 0;
+ rxdesc->flags = 0;
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
- desc->flags |= RX_FLAG_FAILED_FCS_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
- desc->flags |= RX_FLAG_FAILED_PLCP_CRC;
-
- desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL);
- desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) -
- entry->ring->rt2x00dev->rssi_offset;
- desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
- desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
- desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+ rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
+
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL);
+ rxdesc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+ rxdesc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
}
/*
* Interrupt functions.
*/
-static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue)
+static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
+ const enum ieee80211_tx_queue queue_idx)
{
- struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue);
- struct data_entry *entry;
- __le32 *txd;
+ struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ struct queue_entry_priv_pci_tx *priv_tx;
+ struct queue_entry *entry;
+ struct txdone_entry_desc txdesc;
u32 word;
- int tx_status;
- int retry;
- while (!rt2x00_ring_empty(ring)) {
- entry = rt2x00_get_data_entry_done(ring);
- txd = entry->priv;
- rt2x00_desc_read(txd, 0, &word);
+ while (!rt2x00queue_empty(queue)) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+ priv_tx = entry->priv_data;
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
/*
* Obtain the status about this packet.
*/
- tx_status = rt2x00_get_field32(word, TXD_W0_RESULT);
- retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
+ txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT);
+ txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry);
+ rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
}
}
rt2500pci_probe_hw_mode(rt2x00dev);
/*
- * This device requires the beacon ring
+ * This device requires the atim queue
*/
- __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
/*
* Set the rssi offset.
.config = rt2500pci_config,
};
+static const struct data_queue_desc rt2500pci_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = RXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_rx),
+};
+
+static const struct data_queue_desc rt2500pci_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
+static const struct data_queue_desc rt2500pci_queue_bcn = {
+ .entry_num = BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
+static const struct data_queue_desc rt2500pci_queue_atim = {
+ .entry_num = ATIM_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
static const struct rt2x00_ops rt2500pci_ops = {
.name = KBUILD_MODNAME,
- .rxd_size = RXD_DESC_SIZE,
- .txd_size = TXD_DESC_SIZE,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
+ .rx = &rt2500pci_queue_rx,
+ .tx = &rt2500pci_queue_tx,
+ .bcn = &rt2500pci_queue_bcn,
+ .atim = &rt2500pci_queue_atim,
.lib = &rt2500pci_rt2x00_ops,
.hw = &rt2500pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
*/
static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control)
{
- struct skb_desc *skbdesc = get_skb_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
u32 word;
*/
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
- rt2x00_set_field32(&word, TXD_W1_AIFS, desc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags));
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &desc->flags));
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags));
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
- test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags));
+ test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
!!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT));
- rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
/*
* RX control handlers
*/
-static void rt2500usb_fill_rxdone(struct data_entry *entry,
- struct rxdata_entry_desc *desc)
+static void rt2500usb_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- struct skb_desc *skbdesc = get_skb_desc(entry->skb);
- struct urb *urb = entry->priv;
- __le32 *rxd = (__le32 *)(entry->skb->data +
- (urb->actual_length - entry->ring->desc_size));
+ struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ __le32 *rxd =
+ (__le32 *)(entry->skb->data +
+ (priv_rx->urb->actual_length - entry->queue->desc_size));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ int header_size = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
u32 word0;
u32 word1;
rt2x00_desc_read(rxd, 0, &word0);
rt2x00_desc_read(rxd, 1, &word1);
- desc->flags = 0;
+ rxdesc->flags = 0;
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
- desc->flags |= RX_FLAG_FAILED_FCS_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
- desc->flags |= RX_FLAG_FAILED_PLCP_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
/*
* Obtain the status about this packet.
*/
- desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
- desc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) -
- entry->ring->rt2x00dev->rssi_offset;
- desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
- desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
- desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
+ rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+ rxdesc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
/*
- * Set descriptor and data pointer.
+ * The data behind the ieee80211 header must be
+ * aligned on a 4 byte boundary.
+ */
+ if (header_size % 4 == 0) {
+ skb_push(entry->skb, 2);
+ memmove(entry->skb->data, entry->skb->data + 2,
+ entry->skb->len - 2);
+ }
+
+ /*
+ * Set descriptor pointer.
*/
- skbdesc->desc = entry->skb->data + desc->size;
- skbdesc->desc_len = entry->ring->desc_size;
skbdesc->data = entry->skb->data;
- skbdesc->data_len = desc->size;
+ skbdesc->data_len = entry->queue->data_size;
+ skbdesc->desc = entry->skb->data + rxdesc->size;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * Remove descriptor from skb buffer and trim the whole thing
+ * down to only contain data.
+ */
+ skb_trim(entry->skb, rxdesc->size);
}
/*
*/
static void rt2500usb_beacondone(struct urb *urb)
{
- struct data_entry *entry = (struct data_entry *)urb->context;
- struct data_ring *ring = entry->ring;
+ struct queue_entry *entry = (struct queue_entry *)urb->context;
+ struct queue_entry_priv_usb_bcn *priv_bcn = entry->priv_data;
- if (!test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags))
+ if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
return;
/*
* Otherwise we should free the sk_buffer, the device
* should be doing the rest of the work now.
*/
- if (ring->index == 1) {
- rt2x00_ring_index_done_inc(ring);
- entry = rt2x00_get_data_entry(ring);
- usb_submit_urb(entry->priv, GFP_ATOMIC);
- rt2x00_ring_index_inc(ring);
- } else if (ring->index_done == 1) {
- entry = rt2x00_get_data_entry_done(ring);
- if (entry->skb) {
- dev_kfree_skb(entry->skb);
- entry->skb = NULL;
- }
- rt2x00_ring_index_done_inc(ring);
+ if (priv_bcn->guardian_urb == urb) {
+ usb_submit_urb(priv_bcn->urb, GFP_ATOMIC);
+ } else if (priv_bcn->urb == urb) {
+ dev_kfree_skb(entry->skb);
+ entry->skb = NULL;
}
}
rt2500usb_probe_hw_mode(rt2x00dev);
/*
- * This device requires the beacon ring
+ * This device requires the atim queue
*/
- __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
/*
* Set the rssi offset.
struct ieee80211_tx_control *control)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct usb_device *usb_dev =
- interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
- struct skb_desc *desc;
- struct data_ring *ring;
- struct data_entry *beacon;
- struct data_entry *guardian;
+ struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
+ struct queue_entry_priv_usb_bcn *priv_bcn;
+ struct skb_frame_desc *skbdesc;
+ struct data_queue *queue;
+ struct queue_entry *entry;
int pipe = usb_sndbulkpipe(usb_dev, 1);
int length;
* initialization.
*/
control->queue = IEEE80211_TX_QUEUE_BEACON;
- ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
-
- /*
- * Obtain 2 entries, one for the guardian byte,
- * the second for the actual beacon.
- */
- guardian = rt2x00_get_data_entry(ring);
- rt2x00_ring_index_inc(ring);
- beacon = rt2x00_get_data_entry(ring);
+ queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ priv_bcn = entry->priv_data;
/*
* Add the descriptor in front of the skb.
*/
- skb_push(skb, ring->desc_size);
- memset(skb->data, 0, ring->desc_size);
+ skb_push(skb, queue->desc_size);
+ memset(skb->data, 0, queue->desc_size);
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len - ring->desc_size;
- desc->desc = skb->data;
- desc->data = skb->data + ring->desc_size;
- desc->ring = ring;
- desc->entry = beacon;
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data + queue->desc_size;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = skb->data;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
*/
length = rt2500usb_get_tx_data_len(rt2x00dev, skb);
- usb_fill_bulk_urb(beacon->priv, usb_dev, pipe,
- skb->data, length, rt2500usb_beacondone, beacon);
+ usb_fill_bulk_urb(priv_bcn->urb, usb_dev, pipe,
+ skb->data, length, rt2500usb_beacondone, entry);
/*
* Second we need to create the guardian byte.
* We only need a single byte, so lets recycle
* the 'flags' field we are not using for beacons.
*/
- guardian->flags = 0;
- usb_fill_bulk_urb(guardian->priv, usb_dev, pipe,
- &guardian->flags, 1, rt2500usb_beacondone, guardian);
+ priv_bcn->guardian_data = 0;
+ usb_fill_bulk_urb(priv_bcn->guardian_urb, usb_dev, pipe,
+ &priv_bcn->guardian_data, 1, rt2500usb_beacondone,
+ entry);
/*
* Send out the guardian byte.
*/
- usb_submit_urb(guardian->priv, GFP_ATOMIC);
+ usb_submit_urb(priv_bcn->guardian_urb, GFP_ATOMIC);
/*
* Enable beacon generation.
.config = rt2500usb_config,
};
+static const struct data_queue_desc rt2500usb_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = RXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_rx),
+};
+
+static const struct data_queue_desc rt2500usb_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_tx),
+};
+
+static const struct data_queue_desc rt2500usb_queue_bcn = {
+ .entry_num = BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_bcn),
+};
+
+static const struct data_queue_desc rt2500usb_queue_atim = {
+ .entry_num = ATIM_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_tx),
+};
+
static const struct rt2x00_ops rt2500usb_ops = {
.name = KBUILD_MODNAME,
- .rxd_size = RXD_DESC_SIZE,
- .txd_size = TXD_DESC_SIZE,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
+ .rx = &rt2500usb_queue_rx,
+ .tx = &rt2500usb_queue_tx,
+ .bcn = &rt2500usb_queue_bcn,
+ .atim = &rt2500usb_queue_atim,
.lib = &rt2500usb_rt2x00_ops,
.hw = &rt2500usb_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
#define RT2X00_H
#include <linux/bitops.h>
-#include <linux/prefetch.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include "rt2x00debug.h"
#include "rt2x00reg.h"
-#include "rt2x00ring.h"
+#include "rt2x00queue.h"
/*
* Module information.
#define EEPROM(__dev, __msg, __args...) \
DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args)
-/*
- * Ring sizes.
- * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes.
- * DATA_FRAME_SIZE is used for TX, RX, ATIM and PRIO rings.
- * MGMT_FRAME_SIZE is used for the BEACON ring.
- */
-#define DATA_FRAME_SIZE 2432
-#define MGMT_FRAME_SIZE 256
-
-/*
- * Number of entries in a packet ring.
- * PCI devices only need 1 Beacon entry,
- * but USB devices require a second because they
- * have to send a Guardian byte first.
- */
-#define RX_ENTRIES 12
-#define TX_ENTRIES 12
-#define ATIM_ENTRIES 1
-#define BEACON_ENTRIES 2
-
/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
void (*uninitialize) (struct rt2x00_dev *rt2x00dev);
/*
- * Ring initialization handlers
+ * queue initialization handlers
*/
void (*init_rxentry) (struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry);
+ struct queue_entry *entry);
void (*init_txentry) (struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry);
+ struct queue_entry *entry);
/*
* Radio control handlers.
*/
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control);
int (*write_tx_data) (struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring, struct sk_buff *skb,
+ struct data_queue *queue, struct sk_buff *skb,
struct ieee80211_tx_control *control);
int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb);
/*
* RX control handlers
*/
- void (*fill_rxdone) (struct data_entry *entry,
- struct rxdata_entry_desc *desc);
+ void (*fill_rxdone) (struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc);
/*
* Configuration handlers.
*/
struct rt2x00_ops {
const char *name;
- const unsigned int rxd_size;
- const unsigned int txd_size;
const unsigned int eeprom_size;
const unsigned int rf_size;
+ const struct data_queue_desc *rx;
+ const struct data_queue_desc *tx;
+ const struct data_queue_desc *bcn;
+ const struct data_queue_desc *atim;
const struct rt2x00lib_ops *lib;
const struct ieee80211_ops *hw;
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
* Driver features
*/
DRIVER_REQUIRE_FIRMWARE,
- DRIVER_REQUIRE_BEACON_RING,
+ DRIVER_REQUIRE_BEACON_GUARD,
+ DRIVER_REQUIRE_ATIM_QUEUE,
/*
* Driver configuration
* macro's should be used for correct typecasting.
*/
void *dev;
-#define rt2x00dev_pci(__dev) ( (struct pci_dev*)(__dev)->dev )
-#define rt2x00dev_usb(__dev) ( (struct usb_interface*)(__dev)->dev )
+#define rt2x00dev_pci(__dev) ( (struct pci_dev *)(__dev)->dev )
+#define rt2x00dev_usb(__dev) ( (struct usb_interface *)(__dev)->dev )
+#define rt2x00dev_usb_dev(__dev)\
+ ( (struct usb_device *)interface_to_usbdev(rt2x00dev_usb(__dev)) )
/*
* Callback functions.
struct work_struct config_work;
/*
- * Data ring arrays for RX, TX and Beacon.
- * The Beacon array also contains the Atim ring
+ * Data queue arrays for RX, TX and Beacon.
+ * The Beacon array also contains the Atim queue
* if that is supported by the device.
*/
- int data_rings;
- struct data_ring *rx;
- struct data_ring *tx;
- struct data_ring *bcn;
+ int data_queues;
+ struct data_queue *rx;
+ struct data_queue *tx;
+ struct data_queue *bcn;
/*
* Firmware image.
const struct firmware *fw;
};
-/*
- * For-each loop for the ring array.
- * All rings have been allocated as a single array,
- * this means we can create a very simply loop macro
- * that is capable of looping through all rings.
- * ring_end(), txring_end() and ring_loop() are helper macro's which
- * should not be used directly. Instead the following should be used:
- * ring_for_each() - Loops through all rings (RX, TX, Beacon & Atim)
- * txring_for_each() - Loops through TX data rings (TX only)
- * txringall_for_each() - Loops through all TX rings (TX, Beacon & Atim)
- */
-#define ring_end(__dev) \
- &(__dev)->rx[(__dev)->data_rings]
-
-#define txring_end(__dev) \
- &(__dev)->tx[(__dev)->hw->queues]
-
-#define ring_loop(__entry, __start, __end) \
- for ((__entry) = (__start); \
- prefetch(&(__entry)[1]), (__entry) != (__end); \
- (__entry) = &(__entry)[1])
-
-#define ring_for_each(__dev, __entry) \
- ring_loop(__entry, (__dev)->rx, ring_end(__dev))
-
-#define txring_for_each(__dev, __entry) \
- ring_loop(__entry, (__dev)->tx, txring_end(__dev))
-
-#define txringall_for_each(__dev, __entry) \
- ring_loop(__entry, (__dev)->tx, ring_end(__dev))
-
/*
* Generic RF access.
* The RF is being accessed by word index.
return ((size * 8 * 10) % rate);
}
-/*
- * Library functions.
+/**
+ * rt2x00queue_get_queue - Convert mac80211 queue index to rt2x00 queue
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @queue: mac80211 queue index (see &enum ieee80211_tx_queue).
*/
-struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev,
- const unsigned int queue);
+struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
+ const enum ieee80211_tx_queue queue);
+
+/**
+ * rt2x00queue_get_entry - Get queue entry where the given index points to.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @index: Index identifier for obtaining the correct index.
+ */
+struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
+ enum queue_index index);
+
+/**
+ * rt2x00queue_index_inc - Index incrementation function
+ * @queue: Queue (&struct data_queue) to perform the action on.
+ * @action: Index type (&enum queue_index) to perform the action on.
+ *
+ * This function will increase the requested index on the queue,
+ * it will grab the appropriate locks and handle queue overflow events by
+ * resetting the index to the start of the queue.
+ */
+void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
+
/*
* Interrupt context handlers.
*/
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
-void rt2x00lib_txdone(struct data_entry *entry,
- const int status, const int retry);
-void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb,
- struct rxdata_entry_desc *desc);
+void rt2x00lib_txdone(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc);
+void rt2x00lib_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc);
/*
* TX descriptor initializer
struct sk_buff *skb)
{
struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
- struct skb_desc *desc = get_skb_desc(skb);
+ struct skb_frame_desc *desc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
struct timeval timestamp;
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(desc->frame_type);
- dump_hdr->ring_index = desc->ring->queue_idx;
+ dump_hdr->queue_index = desc->entry->queue->qid;
dump_hdr->entry_index = desc->entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
return 0;
}
-static int rt2x00debug_open_ring_dump(struct inode *inode, struct file *file)
+static int rt2x00debug_open_queue_dump(struct inode *inode, struct file *file)
{
struct rt2x00debug_intf *intf = inode->i_private;
int retval;
return 0;
}
-static int rt2x00debug_release_ring_dump(struct inode *inode, struct file *file)
+static int rt2x00debug_release_queue_dump(struct inode *inode, struct file *file)
{
struct rt2x00debug_intf *intf = inode->i_private;
return rt2x00debug_file_release(inode, file);
}
-static ssize_t rt2x00debug_read_ring_dump(struct file *file,
- char __user *buf,
- size_t length,
- loff_t *offset)
+static ssize_t rt2x00debug_read_queue_dump(struct file *file,
+ char __user *buf,
+ size_t length,
+ loff_t *offset)
{
struct rt2x00debug_intf *intf = file->private_data;
struct sk_buff *skb;
return status;
}
-static unsigned int rt2x00debug_poll_ring_dump(struct file *file,
- poll_table *wait)
+static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
+ poll_table *wait)
{
struct rt2x00debug_intf *intf = file->private_data;
return 0;
}
-static const struct file_operations rt2x00debug_fop_ring_dump = {
+static const struct file_operations rt2x00debug_fop_queue_dump = {
.owner = THIS_MODULE,
- .read = rt2x00debug_read_ring_dump,
- .poll = rt2x00debug_poll_ring_dump,
- .open = rt2x00debug_open_ring_dump,
- .release = rt2x00debug_release_ring_dump,
+ .read = rt2x00debug_read_queue_dump,
+ .poll = rt2x00debug_poll_queue_dump,
+ .open = rt2x00debug_open_queue_dump,
+ .release = rt2x00debug_release_queue_dump,
};
#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \
intf->frame_dump_entry =
debugfs_create_file("dump", S_IRUGO, intf->frame_folder,
- intf, &rt2x00debug_fop_ring_dump);
+ intf, &rt2x00debug_fop_queue_dump);
if (IS_ERR(intf->frame_dump_entry))
goto exit;
#include "rt2x00lib.h"
#include "rt2x00dump.h"
-/*
- * Ring handler.
- */
-struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev,
- const unsigned int queue)
-{
- int beacon = test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags);
-
- /*
- * Check if we are requesting a reqular TX ring,
- * or if we are requesting a Beacon or Atim ring.
- * For Atim rings, we should check if it is supported.
- */
- if (queue < rt2x00dev->hw->queues && rt2x00dev->tx)
- return &rt2x00dev->tx[queue];
-
- if (!rt2x00dev->bcn || !beacon)
- return NULL;
-
- if (queue == IEEE80211_TX_QUEUE_BEACON)
- return &rt2x00dev->bcn[0];
- else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON)
- return &rt2x00dev->bcn[1];
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(rt2x00lib_get_ring);
-
/*
* Link tuning handlers
*/
cancel_delayed_work_sync(&rt2x00dev->link.work);
}
-/*
- * Ring initialization
- */
-static void rt2x00lib_init_rxrings(struct rt2x00_dev *rt2x00dev)
-{
- struct data_ring *ring = rt2x00dev->rx;
- unsigned int i;
-
- if (!rt2x00dev->ops->lib->init_rxentry)
- return;
-
- if (ring->data_addr)
- memset(ring->data_addr, 0, rt2x00_get_ring_size(ring));
-
- for (i = 0; i < ring->stats.limit; i++)
- rt2x00dev->ops->lib->init_rxentry(rt2x00dev, &ring->entry[i]);
-
- rt2x00_ring_index_clear(ring);
-}
-
-static void rt2x00lib_init_txrings(struct rt2x00_dev *rt2x00dev)
-{
- struct data_ring *ring;
- unsigned int i;
-
- if (!rt2x00dev->ops->lib->init_txentry)
- return;
-
- txringall_for_each(rt2x00dev, ring) {
- if (ring->data_addr)
- memset(ring->data_addr, 0, rt2x00_get_ring_size(ring));
-
- for (i = 0; i < ring->stats.limit; i++)
- rt2x00dev->ops->lib->init_txentry(rt2x00dev,
- &ring->entry[i]);
-
- rt2x00_ring_index_clear(ring);
- }
-}
-
/*
* Radio control handlers.
*/
return 0;
/*
- * Initialize all data rings.
+ * Initialize all data queues.
*/
- rt2x00lib_init_rxrings(rt2x00dev);
- rt2x00lib_init_txrings(rt2x00dev);
+ rt2x00queue_init_rx(rt2x00dev);
+ rt2x00queue_init_tx(rt2x00dev);
/*
* Enable radio.
{
struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, beacon_work);
- struct data_ring *ring =
- rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON);
- struct data_entry *entry = rt2x00_get_data_entry(ring);
+ struct ieee80211_tx_control control;
struct sk_buff *skb;
skb = ieee80211_beacon_get(rt2x00dev->hw,
- rt2x00dev->interface.id,
- &entry->tx_status.control);
+ rt2x00dev->interface.id, &control);
if (!skb)
return;
- rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb,
- &entry->tx_status.control);
+ rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb, &control);
dev_kfree_skb(skb);
}
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
-void rt2x00lib_txdone(struct data_entry *entry,
- const int status, const int retry)
+void rt2x00lib_txdone(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev;
- struct ieee80211_tx_status *tx_status = &entry->tx_status;
- struct ieee80211_low_level_stats *stats = &rt2x00dev->low_level_stats;
- int success = !!(status == TX_SUCCESS || status == TX_SUCCESS_RETRY);
- int fail = !!(status == TX_FAIL_RETRY || status == TX_FAIL_INVALID ||
- status == TX_FAIL_OTHER);
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct ieee80211_tx_status tx_status;
+ int success = !!(txdesc->status == TX_SUCCESS ||
+ txdesc->status == TX_SUCCESS_RETRY);
+ int fail = !!(txdesc->status == TX_FAIL_RETRY ||
+ txdesc->status == TX_FAIL_INVALID ||
+ txdesc->status == TX_FAIL_OTHER);
/*
* Update TX statistics.
*/
- tx_status->flags = 0;
- tx_status->ack_signal = 0;
- tx_status->excessive_retries = (status == TX_FAIL_RETRY);
- tx_status->retry_count = retry;
rt2x00dev->link.qual.tx_success += success;
- rt2x00dev->link.qual.tx_failed += retry + fail;
+ rt2x00dev->link.qual.tx_failed += txdesc->retry + fail;
- if (!(tx_status->control.flags & IEEE80211_TXCTL_NO_ACK)) {
+ /*
+ * Initialize TX status
+ */
+ tx_status.flags = 0;
+ tx_status.ack_signal = 0;
+ tx_status.excessive_retries = (txdesc->status == TX_FAIL_RETRY);
+ tx_status.retry_count = txdesc->retry;
+ memcpy(&tx_status.control, txdesc->control, sizeof(txdesc->control));
+
+ if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) {
if (success)
- tx_status->flags |= IEEE80211_TX_STATUS_ACK;
+ tx_status.flags |= IEEE80211_TX_STATUS_ACK;
else
- stats->dot11ACKFailureCount++;
+ rt2x00dev->low_level_stats.dot11ACKFailureCount++;
}
- tx_status->queue_length = entry->ring->stats.limit;
- tx_status->queue_number = tx_status->control.queue;
+ tx_status.queue_length = entry->queue->limit;
+ tx_status.queue_number = tx_status.control.queue;
- if (tx_status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) {
+ if (tx_status.control.flags & IEEE80211_TXCTL_USE_RTS_CTS) {
if (success)
- stats->dot11RTSSuccessCount++;
+ rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
else
- stats->dot11RTSFailureCount++;
+ rt2x00dev->low_level_stats.dot11RTSFailureCount++;
}
/*
* Send the tx_status to mac80211 & debugfs.
* mac80211 will clean up the skb structure.
*/
- get_skb_desc(entry->skb)->frame_type = DUMP_FRAME_TXDONE;
+ get_skb_frame_desc(entry->skb)->frame_type = DUMP_FRAME_TXDONE;
rt2x00debug_dump_frame(rt2x00dev, entry->skb);
- ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb, tx_status);
+ ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb, &tx_status);
entry->skb = NULL;
}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
-void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb,
- struct rxdata_entry_desc *desc)
+void rt2x00lib_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev;
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status;
struct ieee80211_hw_mode *mode;
struct ieee80211_rate *rate;
* the signal is the PLCP value. If it was received with
* a CCK bitrate the signal is the rate in 0.5kbit/s.
*/
- if (!desc->ofdm)
+ if (!rxdesc->ofdm)
val = DEVICE_GET_RATE_FIELD(rate->val, RATE);
else
val = DEVICE_GET_RATE_FIELD(rate->val, PLCP);
- if (val == desc->signal) {
+ if (val == rxdesc->signal) {
val = rate->val;
break;
}
/*
* Only update link status if this is a beacon frame carrying our bssid.
*/
- hdr = (struct ieee80211_hdr*)skb->data;
+ hdr = (struct ieee80211_hdr*)entry->skb->data;
fc = le16_to_cpu(hdr->frame_control);
- if (is_beacon(fc) && desc->my_bss)
- rt2x00lib_update_link_stats(&rt2x00dev->link, desc->rssi);
+ if (is_beacon(fc) && rxdesc->my_bss)
+ rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc->rssi);
rt2x00dev->link.qual.rx_success++;
rx_status->rate = val;
rx_status->signal =
- rt2x00lib_calculate_link_signal(rt2x00dev, desc->rssi);
- rx_status->ssi = desc->rssi;
- rx_status->flag = desc->flags;
+ rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi);
+ rx_status->ssi = rxdesc->rssi;
+ rx_status->flag = rxdesc->flags;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
/*
- * Send frame to mac80211 & debugfs
+ * Send frame to mac80211 & debugfs.
+ * mac80211 will clean up the skb structure.
*/
- get_skb_desc(skb)->frame_type = DUMP_FRAME_RXDONE;
- rt2x00debug_dump_frame(rt2x00dev, skb);
- ieee80211_rx_irqsafe(rt2x00dev->hw, skb, rx_status);
+ get_skb_frame_desc(entry->skb)->frame_type = DUMP_FRAME_RXDONE;
+ rt2x00debug_dump_frame(rt2x00dev, entry->skb);
+ ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status);
+ entry->skb = NULL;
}
EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
struct sk_buff *skb,
struct ieee80211_tx_control *control)
{
- struct txdata_entry_desc desc;
- struct skb_desc *skbdesc = get_skb_desc(skb);
- struct ieee80211_hdr *ieee80211hdr = skbdesc->data;
+ struct txentry_desc txdesc;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
int tx_rate;
int bitrate;
int length;
u16 frame_control;
u16 seq_ctrl;
- memset(&desc, 0, sizeof(desc));
+ memset(&txdesc, 0, sizeof(txdesc));
- desc.cw_min = skbdesc->ring->tx_params.cw_min;
- desc.cw_max = skbdesc->ring->tx_params.cw_max;
- desc.aifs = skbdesc->ring->tx_params.aifs;
+ txdesc.cw_min = skbdesc->entry->queue->cw_min;
+ txdesc.cw_max = skbdesc->entry->queue->cw_max;
+ txdesc.aifs = skbdesc->entry->queue->aifs;
/*
* Identify queue
*/
if (control->queue < rt2x00dev->hw->queues)
- desc.queue = control->queue;
+ txdesc.queue = control->queue;
else if (control->queue == IEEE80211_TX_QUEUE_BEACON ||
control->queue == IEEE80211_TX_QUEUE_AFTER_BEACON)
- desc.queue = QUEUE_MGMT;
+ txdesc.queue = QID_MGMT;
else
- desc.queue = QUEUE_OTHER;
+ txdesc.queue = QID_OTHER;
/*
* Read required fields from ieee80211 header.
* Check whether this frame is to be acked
*/
if (!(control->flags & IEEE80211_TXCTL_NO_ACK))
- __set_bit(ENTRY_TXD_ACK, &desc.flags);
+ __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
/*
* Check if this is a RTS/CTS frame
*/
if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
- __set_bit(ENTRY_TXD_BURST, &desc.flags);
+ __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
if (is_rts_frame(frame_control)) {
- __set_bit(ENTRY_TXD_RTS_FRAME, &desc.flags);
- __set_bit(ENTRY_TXD_ACK, &desc.flags);
+ __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags);
+ __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
} else
- __clear_bit(ENTRY_TXD_ACK, &desc.flags);
+ __clear_bit(ENTRY_TXD_ACK, &txdesc.flags);
if (control->rts_cts_rate)
tx_rate = control->rts_cts_rate;
}
* Check for OFDM
*/
if (DEVICE_GET_RATE_FIELD(tx_rate, RATEMASK) & DEV_OFDM_RATEMASK)
- __set_bit(ENTRY_TXD_OFDM_RATE, &desc.flags);
+ __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc.flags);
/*
* Check if more fragments are pending
*/
if (ieee80211_get_morefrag(ieee80211hdr)) {
- __set_bit(ENTRY_TXD_BURST, &desc.flags);
- __set_bit(ENTRY_TXD_MORE_FRAG, &desc.flags);
+ __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
+ __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc.flags);
}
/*
*/
if (control->queue == IEEE80211_TX_QUEUE_BEACON ||
is_probe_resp(frame_control))
- __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc.flags);
+ __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc.flags);
/*
* Determine with what IFS priority this frame should be send.
* or this fragment came after RTS/CTS.
*/
if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 ||
- test_bit(ENTRY_TXD_RTS_FRAME, &desc.flags))
- desc.ifs = IFS_SIFS;
+ test_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags))
+ txdesc.ifs = IFS_SIFS;
else
- desc.ifs = IFS_BACKOFF;
+ txdesc.ifs = IFS_BACKOFF;
/*
* PLCP setup
* Length calculation depends on OFDM/CCK rate.
*/
- desc.signal = DEVICE_GET_RATE_FIELD(tx_rate, PLCP);
- desc.service = 0x04;
+ txdesc.signal = DEVICE_GET_RATE_FIELD(tx_rate, PLCP);
+ txdesc.service = 0x04;
- length = skbdesc->data_len + FCS_LEN;
- if (test_bit(ENTRY_TXD_OFDM_RATE, &desc.flags)) {
- desc.length_high = (length >> 6) & 0x3f;
- desc.length_low = length & 0x3f;
+ length = skb->len + FCS_LEN;
+ if (test_bit(ENTRY_TXD_OFDM_RATE, &txdesc.flags)) {
+ txdesc.length_high = (length >> 6) & 0x3f;
+ txdesc.length_low = length & 0x3f;
} else {
bitrate = DEVICE_GET_RATE_FIELD(tx_rate, RATE);
* Check if we need to set the Length Extension
*/
if (bitrate == 110 && residual <= 30)
- desc.service |= 0x80;
+ txdesc.service |= 0x80;
}
- desc.length_high = (duration >> 8) & 0xff;
- desc.length_low = duration & 0xff;
+ txdesc.length_high = (duration >> 8) & 0xff;
+ txdesc.length_low = duration & 0xff;
/*
* When preamble is enabled we should set the
* preamble bit for the signal.
*/
if (DEVICE_GET_RATE_FIELD(tx_rate, PREAMBLE))
- desc.signal |= 0x08;
+ txdesc.signal |= 0x08;
}
- rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &desc, control);
+ rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &txdesc, control);
/*
- * Update ring entry.
+ * Update queue entry.
*/
skbdesc->entry->skb = skb;
- memcpy(&skbdesc->entry->tx_status.control, control, sizeof(*control));
/*
* The frame has been completely initialized and ready
/*
* Initialization/uninitialization handlers.
*/
-static int rt2x00lib_alloc_entries(struct data_ring *ring,
- const u16 max_entries, const u16 data_size,
- const u16 desc_size)
-{
- struct data_entry *entry;
- unsigned int i;
-
- ring->stats.limit = max_entries;
- ring->data_size = data_size;
- ring->desc_size = desc_size;
-
- /*
- * Allocate all ring entries.
- */
- entry = kzalloc(ring->stats.limit * sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- for (i = 0; i < ring->stats.limit; i++) {
- entry[i].flags = 0;
- entry[i].ring = ring;
- entry[i].skb = NULL;
- entry[i].entry_idx = i;
- }
-
- ring->entry = entry;
-
- return 0;
-}
-
-static int rt2x00lib_alloc_ring_entries(struct rt2x00_dev *rt2x00dev)
-{
- struct data_ring *ring;
-
- /*
- * Allocate the RX ring.
- */
- if (rt2x00lib_alloc_entries(rt2x00dev->rx, RX_ENTRIES, DATA_FRAME_SIZE,
- rt2x00dev->ops->rxd_size))
- return -ENOMEM;
-
- /*
- * First allocate the TX rings.
- */
- txring_for_each(rt2x00dev, ring) {
- if (rt2x00lib_alloc_entries(ring, TX_ENTRIES, DATA_FRAME_SIZE,
- rt2x00dev->ops->txd_size))
- return -ENOMEM;
- }
-
- if (!test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags))
- return 0;
-
- /*
- * Allocate the BEACON ring.
- */
- if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[0], BEACON_ENTRIES,
- MGMT_FRAME_SIZE, rt2x00dev->ops->txd_size))
- return -ENOMEM;
-
- /*
- * Allocate the Atim ring.
- */
- if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[1], ATIM_ENTRIES,
- DATA_FRAME_SIZE, rt2x00dev->ops->txd_size))
- return -ENOMEM;
-
- return 0;
-}
-
-static void rt2x00lib_free_ring_entries(struct rt2x00_dev *rt2x00dev)
-{
- struct data_ring *ring;
-
- ring_for_each(rt2x00dev, ring) {
- kfree(ring->entry);
- ring->entry = NULL;
- }
-}
-
static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
{
if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags))
rt2x00dev->ops->lib->uninitialize(rt2x00dev);
/*
- * Free allocated ring entries.
+ * Free allocated queue entries.
*/
- rt2x00lib_free_ring_entries(rt2x00dev);
+ rt2x00queue_uninitialize(rt2x00dev);
}
static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
return 0;
/*
- * Allocate all ring entries.
+ * Allocate all queue entries.
*/
- status = rt2x00lib_alloc_ring_entries(rt2x00dev);
- if (status) {
- ERROR(rt2x00dev, "Ring entries allocation failed.\n");
+ status = rt2x00queue_initialize(rt2x00dev);
+ if (status)
return status;
- }
/*
* Initialize the device.
*/
status = rt2x00rfkill_register(rt2x00dev);
if (status)
- goto exit_unitialize;
+ goto exit;
return 0;
-exit_unitialize:
- rt2x00lib_uninitialize(rt2x00dev);
-
exit:
- rt2x00lib_free_ring_entries(rt2x00dev);
+ rt2x00lib_uninitialize(rt2x00dev);
return status;
}
/*
* driver allocation handlers.
*/
-static int rt2x00lib_alloc_rings(struct rt2x00_dev *rt2x00dev)
-{
- struct data_ring *ring;
- unsigned int index;
-
- /*
- * We need the following rings:
- * RX: 1
- * TX: hw->queues
- * Beacon: 1 (if required)
- * Atim: 1 (if required)
- */
- rt2x00dev->data_rings = 1 + rt2x00dev->hw->queues +
- (2 * test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags));
-
- ring = kzalloc(rt2x00dev->data_rings * sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- ERROR(rt2x00dev, "Ring allocation failed.\n");
- return -ENOMEM;
- }
-
- /*
- * Initialize pointers
- */
- rt2x00dev->rx = ring;
- rt2x00dev->tx = &rt2x00dev->rx[1];
- if (test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags))
- rt2x00dev->bcn = &rt2x00dev->tx[rt2x00dev->hw->queues];
-
- /*
- * Initialize ring parameters.
- * RX: queue_idx = 0
- * TX: queue_idx = IEEE80211_TX_QUEUE_DATA0 + index
- * TX: cw_min: 2^5 = 32.
- * TX: cw_max: 2^10 = 1024.
- */
- rt2x00dev->rx->rt2x00dev = rt2x00dev;
- rt2x00dev->rx->queue_idx = 0;
-
- index = IEEE80211_TX_QUEUE_DATA0;
- txring_for_each(rt2x00dev, ring) {
- ring->rt2x00dev = rt2x00dev;
- ring->queue_idx = index++;
- ring->tx_params.aifs = 2;
- ring->tx_params.cw_min = 5;
- ring->tx_params.cw_max = 10;
- }
-
- return 0;
-}
-
-static void rt2x00lib_free_rings(struct rt2x00_dev *rt2x00dev)
-{
- kfree(rt2x00dev->rx);
- rt2x00dev->rx = NULL;
- rt2x00dev->tx = NULL;
- rt2x00dev->bcn = NULL;
-}
-
int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
int retval = -ENOMEM;
rt2x00dev->interface.type = IEEE80211_IF_TYPE_INVALID;
/*
- * Allocate ring array.
+ * Allocate queue array.
*/
- retval = rt2x00lib_alloc_rings(rt2x00dev);
+ retval = rt2x00queue_allocate(rt2x00dev);
if (retval)
goto exit;
rt2x00lib_free_firmware(rt2x00dev);
/*
- * Free ring structures.
+ * Free queue structures.
*/
- rt2x00lib_free_rings(rt2x00dev);
+ rt2x00queue_free(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
* @chip_rf: RF chipset
* @chip_rev: Chipset revision
* @type: The frame type (&rt2x00_dump_type)
- * @ring_index: The index number of the data ring.
- * @entry_index: The index number of the entry inside the data ring.
+ * @queue_index: The index number of the data queue.
+ * @entry_index: The index number of the entry inside the data queue.
* @timestamp_sec: Timestamp - seconds
* @timestamp_usec: Timestamp - microseconds
*/
__le32 chip_rev;
__le16 type;
- __u8 ring_index;
+ __u8 queue_index;
__u8 entry_index;
__le32 timestamp_sec;
void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf, const int force_config);
+/*
+ * Queue handlers.
+ */
+void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev);
+void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev);
+int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev);
+void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev);
+int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev);
+void rt2x00queue_free(struct rt2x00_dev *rt2x00dev);
+
/*
* Firmware handlers.
*/
#include "rt2x00lib.h"
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring,
+ struct data_queue *queue,
struct sk_buff *frag_skb,
struct ieee80211_tx_control *control)
{
frag_skb->data, frag_skb->len, control,
(struct ieee80211_rts *)(skb->data));
- if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) {
+ if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
return NETDEV_TX_BUSY;
}
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
- struct data_ring *ring;
+ struct data_queue *queue;
u16 frame_control;
/*
}
/*
- * Determine which ring to put packet on.
+ * Determine which queue to put packet on.
*/
- ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
- if (unlikely(!ring)) {
+ queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
+ if (unlikely(!queue)) {
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
"Please file bug report to %s.\n",
if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) &&
(control->flags & (IEEE80211_TXCTL_USE_RTS_CTS |
IEEE80211_TXCTL_USE_CTS_PROTECT))) {
- if (rt2x00_ring_free(ring) <= 1) {
+ if (rt2x00queue_available(queue) <= 1) {
ieee80211_stop_queue(rt2x00dev->hw, control->queue);
return NETDEV_TX_BUSY;
}
- if (rt2x00mac_tx_rts_cts(rt2x00dev, ring, skb, control)) {
+ if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb, control)) {
ieee80211_stop_queue(rt2x00dev->hw, control->queue);
return NETDEV_TX_BUSY;
}
}
- if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) {
+ if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
ieee80211_stop_queue(rt2x00dev->hw, control->queue);
return NETDEV_TX_BUSY;
}
- if (rt2x00_ring_full(ring))
+ if (rt2x00queue_full(queue))
ieee80211_stop_queue(rt2x00dev->hw, control->queue);
if (rt2x00dev->ops->lib->kick_tx_queue)
!is_interface_present(intf))
return;
- intf->id = 0;
+ intf->id = NULL;
intf->type = IEEE80211_IF_TYPE_INVALID;
memset(&intf->bssid, 0x00, ETH_ALEN);
memset(&intf->mac, 0x00, ETH_ALEN);
struct rt2x00_dev *rt2x00dev = hw->priv;
unsigned int i;
- for (i = 0; i < hw->queues; i++)
- memcpy(&stats->data[i], &rt2x00dev->tx[i].stats,
- sizeof(rt2x00dev->tx[i].stats));
+ for (i = 0; i < hw->queues; i++) {
+ stats->data[i].len = rt2x00dev->tx[i].length;
+ stats->data[i].limit = rt2x00dev->tx[i].limit;
+ stats->data[i].count = rt2x00dev->tx[i].count;
+ }
return 0;
}
}
EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
-int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue,
+int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct data_ring *ring;
+ struct data_queue *queue;
- ring = rt2x00lib_get_ring(rt2x00dev, queue);
- if (unlikely(!ring))
+ queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ if (unlikely(!queue))
return -EINVAL;
/*
* Ralink registers require to know the bit number 'n'.
*/
if (params->cw_min)
- ring->tx_params.cw_min = fls(params->cw_min);
+ queue->cw_min = fls(params->cw_min);
else
- ring->tx_params.cw_min = 5; /* cw_min: 2^5 = 32. */
+ queue->cw_min = 5; /* cw_min: 2^5 = 32. */
if (params->cw_max)
- ring->tx_params.cw_max = fls(params->cw_max);
+ queue->cw_max = fls(params->cw_max);
else
- ring->tx_params.cw_max = 10; /* cw_min: 2^10 = 1024. */
+ queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
if (params->aifs)
- ring->tx_params.aifs = params->aifs;
+ queue->aifs = params->aifs;
else
- ring->tx_params.aifs = 2;
+ queue->aifs = 2;
INFO(rt2x00dev,
- "Configured TX ring %d - CWmin: %d, CWmax: %d, Aifs: %d.\n",
- queue, ring->tx_params.cw_min, ring->tx_params.cw_max,
- ring->tx_params.aifs);
+ "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d.\n",
+ queue_idx, queue->cw_min, queue->cw_max, queue->aifs);
return 0;
}
struct ieee80211_tx_control *control)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct skb_desc *desc;
- struct data_ring *ring;
- struct data_entry *entry;
+ struct queue_entry_priv_pci_tx *priv_tx;
+ struct skb_frame_desc *skbdesc;
+ struct data_queue *queue;
+ struct queue_entry *entry;
/*
* Just in case mac80211 doesn't set this correctly,
* initialization.
*/
control->queue = IEEE80211_TX_QUEUE_BEACON;
- ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
- entry = rt2x00_get_data_entry(ring);
+ queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ priv_tx = entry->priv_data;
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len;
- desc->desc = entry->priv;
- desc->data = skb->data;
- desc->ring = ring;
- desc->entry = entry;
-
- memcpy(entry->data_addr, skb->data, skb->len);
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = priv_tx->desc;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
+
+ memcpy(priv_tx->data, skb->data, skb->len);
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
/*
* TX data handlers.
*/
int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring, struct sk_buff *skb,
+ struct data_queue *queue, struct sk_buff *skb,
struct ieee80211_tx_control *control)
{
- struct data_entry *entry = rt2x00_get_data_entry(ring);
- __le32 *txd = entry->priv;
- struct skb_desc *desc;
+ struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
+ struct skb_frame_desc *skbdesc;
u32 word;
- if (rt2x00_ring_full(ring))
+ if (rt2x00queue_full(queue))
return -EINVAL;
- rt2x00_desc_read(txd, 0, &word);
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len;
- desc->desc = entry->priv;
- desc->data = skb->data;
- desc->ring = ring;
- desc->entry = entry;
-
- memcpy(entry->data_addr, skb->data, skb->len);
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = priv_tx->desc;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
+
+ memcpy(priv_tx->data, skb->data, skb->len);
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
- rt2x00_ring_index_inc(ring);
+ rt2x00queue_index_inc(queue, Q_INDEX);
return 0;
}
*/
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring = rt2x00dev->rx;
- struct data_entry *entry;
- struct sk_buff *skb;
+ struct data_queue *queue = rt2x00dev->rx;
+ struct queue_entry *entry;
+ struct queue_entry_priv_pci_rx *priv_rx;
struct ieee80211_hdr *hdr;
- struct skb_desc *skbdesc;
- struct rxdata_entry_desc desc;
+ struct skb_frame_desc *skbdesc;
+ struct rxdone_entry_desc rxdesc;
int header_size;
- __le32 *rxd;
int align;
u32 word;
while (1) {
- entry = rt2x00_get_data_entry(ring);
- rxd = entry->priv;
- rt2x00_desc_read(rxd, 0, &word);
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ priv_rx = entry->priv_data;
+ rt2x00_desc_read(priv_rx->desc, 0, &word);
if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
break;
- memset(&desc, 0, sizeof(desc));
- rt2x00dev->ops->lib->fill_rxdone(entry, &desc);
+ memset(&rxdesc, 0, sizeof(rxdesc));
+ rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
- hdr = (struct ieee80211_hdr *)entry->data_addr;
+ hdr = (struct ieee80211_hdr *)priv_rx->data;
header_size =
ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
* Allocate the sk_buffer, initialize it and copy
* all data into it.
*/
- skb = dev_alloc_skb(desc.size + align);
- if (!skb)
+ entry->skb = dev_alloc_skb(rxdesc.size + align);
+ if (!entry->skb)
return;
- skb_reserve(skb, align);
- memcpy(skb_put(skb, desc.size), entry->data_addr, desc.size);
+ skb_reserve(entry->skb, align);
+ memcpy(skb_put(entry->skb, rxdesc.size),
+ priv_rx->data, rxdesc.size);
/*
* Fill in skb descriptor
*/
- skbdesc = get_skb_desc(skb);
- skbdesc->desc_len = entry->ring->desc_size;
- skbdesc->data_len = skb->len;
- skbdesc->desc = entry->priv;
- skbdesc->data = skb->data;
- skbdesc->ring = ring;
+ skbdesc = get_skb_frame_desc(entry->skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = entry->skb->data;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = priv_rx->desc;
+ skbdesc->desc_len = queue->desc_size;
skbdesc->entry = entry;
/*
* Send the frame to rt2x00lib for further processing.
*/
- rt2x00lib_rxdone(entry, skb, &desc);
+ rt2x00lib_rxdone(entry, &rxdesc);
- if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) {
+ if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
- rt2x00_desc_write(rxd, 0, word);
+ rt2x00_desc_write(priv_rx->desc, 0, word);
}
- rt2x00_ring_index_inc(ring);
+ rt2x00queue_index_inc(queue, Q_INDEX);
}
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
-void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry,
- const int tx_status, const int retry)
+void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc)
{
+ struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
u32 word;
- rt2x00lib_txdone(entry, tx_status, retry);
+ txdesc->control = &priv_tx->control;
+ rt2x00lib_txdone(entry, txdesc);
/*
* Make this entry available for reuse.
*/
entry->flags = 0;
- rt2x00_desc_read(entry->priv, 0, &word);
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
- rt2x00_desc_write(entry->priv, 0, word);
+ rt2x00_desc_write(priv_tx->desc, 0, word);
- rt2x00_ring_index_done_inc(entry->ring);
+ rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
/*
- * If the data ring was full before the txdone handler
+ * If the data queue was full before the txdone handler
* we must make sure the packet queue in the mac80211 stack
* is reenabled when the txdone handler has finished.
*/
- if (!rt2x00_ring_full(entry->ring))
- ieee80211_wake_queue(rt2x00dev->hw,
- entry->tx_status.control.queue);
+ if (!rt2x00queue_full(entry->queue))
+ ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
}
EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
/*
* Device initialization handlers.
*/
-#define priv_offset(__ring, __i) \
-({ \
- ring->data_addr + (i * ring->desc_size); \
+#define dma_size(__queue) \
+({ \
+ (__queue)->limit * \
+ ((__queue)->desc_size + (__queue)->data_size);\
})
-#define data_addr_offset(__ring, __i) \
-({ \
- (__ring)->data_addr + \
- ((__ring)->stats.limit * (__ring)->desc_size) + \
- ((__i) * (__ring)->data_size); \
+#define priv_offset(__queue, __base, __i) \
+({ \
+ (__base) + ((__i) * (__queue)->desc_size); \
})
-#define data_dma_offset(__ring, __i) \
-({ \
- (__ring)->data_dma + \
- ((__ring)->stats.limit * (__ring)->desc_size) + \
- ((__i) * (__ring)->data_size); \
+#define data_addr_offset(__queue, __base, __i) \
+({ \
+ (__base) + \
+ ((__queue)->limit * (__queue)->desc_size) + \
+ ((__i) * (__queue)->data_size); \
})
-static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring)
+#define data_dma_offset(__queue, __base, __i) \
+({ \
+ (__base) + \
+ ((__queue)->limit * (__queue)->desc_size) + \
+ ((__i) * (__queue)->data_size); \
+})
+
+static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
{
+ struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
+ struct queue_entry_priv_pci_tx *priv_tx;
+ void *data_addr;
+ dma_addr_t data_dma;
unsigned int i;
/*
* Allocate DMA memory for descriptor and buffer.
*/
- ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev),
- rt2x00_get_ring_size(ring),
- &ring->data_dma);
- if (!ring->data_addr)
+ data_addr = pci_alloc_consistent(pci_dev, dma_size(queue), &data_dma);
+ if (!data_addr)
return -ENOMEM;
/*
- * Initialize all ring entries to contain valid
- * addresses.
+ * Initialize all queue entries to contain valid addresses.
*/
- for (i = 0; i < ring->stats.limit; i++) {
- ring->entry[i].priv = priv_offset(ring, i);
- ring->entry[i].data_addr = data_addr_offset(ring, i);
- ring->entry[i].data_dma = data_dma_offset(ring, i);
+ for (i = 0; i < queue->limit; i++) {
+ priv_tx = queue->entries[i].priv_data;
+ priv_tx->desc = priv_offset(queue, data_addr, i);
+ priv_tx->data = data_addr_offset(queue, data_addr, i);
+ priv_tx->dma = data_dma_offset(queue, data_dma, i);
}
return 0;
}
-static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring)
+static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
{
- if (ring->data_addr)
- pci_free_consistent(rt2x00dev_pci(rt2x00dev),
- rt2x00_get_ring_size(ring),
- ring->data_addr, ring->data_dma);
- ring->data_addr = NULL;
+ struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
+ struct queue_entry_priv_pci_tx *priv_tx = queue->entries[0].priv_data;
+
+ if (priv_tx->data)
+ pci_free_consistent(pci_dev, dma_size(queue),
+ priv_tx->data, priv_tx->dma);
+ priv_tx->data = NULL;
}
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
- struct data_ring *ring;
+ struct data_queue *queue;
int status;
/*
* Allocate DMA
*/
- ring_for_each(rt2x00dev, ring) {
- status = rt2x00pci_alloc_dma(rt2x00dev, ring);
+ queue_for_each(rt2x00dev, queue) {
+ status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
if (status)
goto exit;
}
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring;
+ struct data_queue *queue;
/*
* Free irq line.
/*
* Free DMA
*/
- ring_for_each(rt2x00dev, ring)
- rt2x00pci_free_dma(rt2x00dev, ring);
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
*/
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION("rt2x00 library");
+MODULE_DESCRIPTION("rt2x00 pci library");
MODULE_LICENSE("GPL");
* TX data handlers.
*/
int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring, struct sk_buff *skb,
+ struct data_queue *queue, struct sk_buff *skb,
struct ieee80211_tx_control *control);
-/*
- * RX/TX data handlers.
+/**
+ * struct queue_entry_priv_pci_rx: Per RX entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor.
+ * @data: Pointer to device's entry memory.
+ * @dma: DMA pointer to &data.
+ */
+struct queue_entry_priv_pci_rx {
+ __le32 *desc;
+
+ void *data;
+ dma_addr_t dma;
+};
+
+/**
+ * struct queue_entry_priv_pci_tx: Per TX entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor
+ * @data: Pointer to device's entry memory.
+ * @dma: DMA pointer to &data.
+ * @control: mac80211 control structure used to transmit data.
+ */
+struct queue_entry_priv_pci_tx {
+ __le32 *desc;
+
+ void *data;
+ dma_addr_t dma;
+
+ struct ieee80211_tx_control control;
+};
+
+/**
+ * rt2x00pci_rxdone - Handle RX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
*/
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
-void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry,
- const int tx_status, const int retry);
+
+/**
+ * rt2x00pci_txdone - Handle TX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ * @entry: Entry which has completed the transmission of a frame.
+ * @desc: TX done descriptor
+ */
+void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
+ struct txdone_entry_desc *desc);
/*
* Device initialization handlers.
--- /dev/null
+/*
+ Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2x00lib
+ Abstract: rt2x00 queue specific routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "rt2x00.h"
+#include "rt2x00lib.h"
+
+struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
+ const enum ieee80211_tx_queue queue)
+{
+ int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+
+ if (queue < rt2x00dev->hw->queues && rt2x00dev->tx)
+ return &rt2x00dev->tx[queue];
+
+ if (!rt2x00dev->bcn)
+ return NULL;
+
+ if (queue == IEEE80211_TX_QUEUE_BEACON)
+ return &rt2x00dev->bcn[0];
+ else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON && atim)
+ return &rt2x00dev->bcn[1];
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
+
+struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
+ enum queue_index index)
+{
+ struct queue_entry *entry;
+
+ if (unlikely(index >= Q_INDEX_MAX)) {
+ ERROR(queue->rt2x00dev,
+ "Entry requested from invalid index type (%d)\n", index);
+ return NULL;
+ }
+
+ spin_lock(&queue->lock);
+
+ entry = &queue->entries[queue->index[index]];
+
+ spin_unlock(&queue->lock);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
+
+void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
+{
+ if (unlikely(index >= Q_INDEX_MAX)) {
+ ERROR(queue->rt2x00dev,
+ "Index change on invalid index type (%d)\n", index);
+ return;
+ }
+
+ spin_lock(&queue->lock);
+
+ queue->index[index]++;
+ if (queue->index[index] >= queue->limit)
+ queue->index[index] = 0;
+
+ queue->length--;
+ queue->count += (index == Q_INDEX_DONE);
+
+ spin_unlock(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
+
+static void rt2x00queue_reset(struct data_queue *queue)
+{
+ spin_lock(&queue->lock);
+
+ queue->count = 0;
+ queue->length = 0;
+ memset(queue->index, 0, sizeof(queue->index));
+
+ spin_unlock(&queue->lock);
+}
+
+void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue = rt2x00dev->rx;
+ unsigned int i;
+
+ rt2x00queue_reset(queue);
+
+ if (!rt2x00dev->ops->lib->init_rxentry)
+ return;
+
+ for (i = 0; i < queue->limit; i++)
+ rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
+ &queue->entries[i]);
+}
+
+void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ unsigned int i;
+
+ txall_queue_for_each(rt2x00dev, queue) {
+ rt2x00queue_reset(queue);
+
+ if (!rt2x00dev->ops->lib->init_txentry)
+ continue;
+
+ for (i = 0; i < queue->limit; i++)
+ rt2x00dev->ops->lib->init_txentry(rt2x00dev,
+ &queue->entries[i]);
+ }
+}
+
+static int rt2x00queue_alloc_entries(struct data_queue *queue,
+ const struct data_queue_desc *qdesc)
+{
+ struct queue_entry *entries;
+ unsigned int entry_size;
+ unsigned int i;
+
+ rt2x00queue_reset(queue);
+
+ queue->limit = qdesc->entry_num;
+ queue->data_size = qdesc->data_size;
+ queue->desc_size = qdesc->desc_size;
+
+ /*
+ * Allocate all queue entries.
+ */
+ entry_size = sizeof(*entries) + qdesc->priv_size;
+ entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
+ ( (__base) + ((__limit) * (__esize)) + ((__index) * (__psize)) )
+
+ for (i = 0; i < queue->limit; i++) {
+ entries[i].flags = 0;
+ entries[i].queue = queue;
+ entries[i].skb = NULL;
+ entries[i].entry_idx = i;
+ entries[i].priv_data =
+ QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
+ sizeof(*entries), qdesc->priv_size);
+ }
+
+#undef QUEUE_ENTRY_PRIV_OFFSET
+
+ queue->entries = entries;
+
+ return 0;
+}
+
+int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ int status;
+
+
+ status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
+ if (status)
+ goto exit;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
+ if (status)
+ goto exit;
+ }
+
+ status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
+ if (status)
+ goto exit;
+
+ if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
+ return 0;
+
+ status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
+ rt2x00dev->ops->atim);
+ if (status)
+ goto exit;
+
+ return 0;
+
+exit:
+ ERROR(rt2x00dev, "Queue entries allocation failed.\n");
+
+ rt2x00queue_uninitialize(rt2x00dev);
+
+ return status;
+}
+
+void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ queue_for_each(rt2x00dev, queue) {
+ kfree(queue->entries);
+ queue->entries = NULL;
+ }
+}
+
+int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ enum data_queue_qid qid;
+ unsigned int req_atim =
+ !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+
+ /*
+ * We need the following queues:
+ * RX: 1
+ * TX: hw->queues
+ * Beacon: 1
+ * Atim: 1 (if required)
+ */
+ rt2x00dev->data_queues = 2 + rt2x00dev->hw->queues + req_atim;
+
+ queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ ERROR(rt2x00dev, "Queue allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize pointers
+ */
+ rt2x00dev->rx = queue;
+ rt2x00dev->tx = &queue[1];
+ rt2x00dev->bcn = &queue[1 + rt2x00dev->hw->queues];
+
+ /*
+ * Initialize queue parameters.
+ * RX: qid = QID_RX
+ * TX: qid = QID_AC_BE + index
+ * TX: cw_min: 2^5 = 32.
+ * TX: cw_max: 2^10 = 1024.
+ * BCN & Atim: qid = QID_MGMT
+ */
+ qid = QID_AC_BE;
+ queue_for_each(rt2x00dev, queue) {
+ spin_lock_init(&queue->lock);
+
+ queue->rt2x00dev = rt2x00dev;
+ queue->qid = qid++;
+ queue->aifs = 2;
+ queue->cw_min = 5;
+ queue->cw_max = 10;
+ }
+
+ /*
+ * Fix non-TX data qid's
+ */
+ rt2x00dev->rx->qid = QID_RX;
+ rt2x00dev->bcn[0].qid = QID_MGMT;
+ if (req_atim)
+ rt2x00dev->bcn[1].qid = QID_MGMT;
+
+ return 0;
+}
+
+void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
+{
+ kfree(rt2x00dev->rx);
+ rt2x00dev->rx = NULL;
+ rt2x00dev->tx = NULL;
+ rt2x00dev->bcn = NULL;
+}
--- /dev/null
+/*
+ Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2x00
+ Abstract: rt2x00 queue datastructures and routines
+ */
+
+#ifndef RT2X00QUEUE_H
+#define RT2X00QUEUE_H
+
+#include <linux/prefetch.h>
+
+/**
+ * DOC: Entrie frame size
+ *
+ * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
+ * for USB devices this restriction does not apply, but the value of
+ * 2432 makes sense since it is big enough to contain the maximum fragment
+ * size according to the ieee802.11 specs.
+ */
+#define DATA_FRAME_SIZE 2432
+#define MGMT_FRAME_SIZE 256
+
+/**
+ * DOC: Number of entries per queue
+ *
+ * After research it was concluded that 12 entries in a RX and TX
+ * queue would be sufficient. Although this is almost one third of
+ * the amount the legacy driver allocated, the queues aren't getting
+ * filled to the maximum even when working with the maximum rate.
+ *
+ * FIXME: For virtual interfaces we need a different number
+ * of beacons, since more interfaces require more beacons.
+ */
+#define RX_ENTRIES 12
+#define TX_ENTRIES 12
+#define BEACON_ENTRIES 1
+#define ATIM_ENTRIES 1
+
+/**
+ * enum data_queue_qid: Queue identification
+ */
+enum data_queue_qid {
+ QID_AC_BE = 0,
+ QID_AC_BK = 1,
+ QID_AC_VI = 2,
+ QID_AC_VO = 3,
+ QID_HCCA = 4,
+ QID_MGMT = 13,
+ QID_RX = 14,
+ QID_OTHER = 15,
+};
+
+/**
+ * struct skb_frame_desc: Descriptor information for the skb buffer
+ *
+ * This structure is placed over the skb->cb array, this means that
+ * this structure should not exceed the size of that array (48 bytes).
+ *
+ * @flags: Frame flags.
+ * @frame_type: Frame type, see &enum rt2x00_dump_type.
+ * @data: Pointer to data part of frame (Start of ieee80211 header).
+ * @desc: Pointer to descriptor part of the frame.
+ * Note that this pointer could point to something outside
+ * of the scope of the skb->data pointer.
+ * @data_len: Length of the frame data.
+ * @desc_len: Length of the frame descriptor.
+
+ * @entry: The entry to which this sk buffer belongs.
+ */
+struct skb_frame_desc {
+ unsigned int flags;
+
+ unsigned int frame_type;
+
+ void *data;
+ void *desc;
+
+ unsigned int data_len;
+ unsigned int desc_len;
+
+ struct queue_entry *entry;
+};
+
+static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct skb_frame_desc) > sizeof(skb->cb));
+ return (struct skb_frame_desc *)&skb->cb[0];
+}
+
+/**
+ * struct rxdone_entry_desc: RX Entry descriptor
+ *
+ * Summary of information that has been read from the RX frame descriptor.
+ *
+ * @signal: Signal of the received frame.
+ * @rssi: RSSI of the received frame.
+ * @ofdm: Was frame send with an OFDM rate.
+ * @size: Data size of the received frame.
+ * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
+ * @my_bss: Does this frame originate from device's BSS.
+ */
+struct rxdone_entry_desc {
+ int signal;
+ int rssi;
+ int ofdm;
+ int size;
+ int flags;
+ int my_bss;
+};
+
+/**
+ * struct txdone_entry_desc: TX done entry descriptor
+ *
+ * Summary of information that has been read from the TX frame descriptor
+ * after the device is done with transmission.
+ *
+ * @control: Control structure which was used to transmit the frame.
+ * @status: TX status (See &enum tx_status).
+ * @retry: Retry count.
+ */
+struct txdone_entry_desc {
+ struct ieee80211_tx_control *control;
+ int status;
+ int retry;
+};
+
+/**
+ * enum txentry_desc_flags: Status flags for TX entry descriptor
+ *
+ * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
+ * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
+ * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
+ * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
+ * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
+ * @ENTRY_TXD_ACK: An ACK is required for this frame.
+ */
+enum txentry_desc_flags {
+ ENTRY_TXD_RTS_FRAME,
+ ENTRY_TXD_OFDM_RATE,
+ ENTRY_TXD_MORE_FRAG,
+ ENTRY_TXD_REQ_TIMESTAMP,
+ ENTRY_TXD_BURST,
+ ENTRY_TXD_ACK,
+};
+
+/**
+ * struct txentry_desc: TX Entry descriptor
+ *
+ * Summary of information for the frame descriptor before sending a TX frame.
+ *
+ * @flags: Descriptor flags (See &enum queue_entry_flags).
+ * @queue: Queue identification (See &enum data_queue_qid).
+ * @length_high: PLCP length high word.
+ * @length_low: PLCP length low word.
+ * @signal: PLCP signal.
+ * @service: PLCP service.
+ * @aifs: AIFS value.
+ * @ifs: IFS value.
+ * @cw_min: cwmin value.
+ * @cw_max: cwmax value.
+ */
+struct txentry_desc {
+ unsigned long flags;
+
+ enum data_queue_qid queue;
+
+ u16 length_high;
+ u16 length_low;
+ u16 signal;
+ u16 service;
+
+ int aifs;
+ int ifs;
+ int cw_min;
+ int cw_max;
+};
+
+/**
+ * enum queue_entry_flags: Status flags for queue entry
+ *
+ * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
+ * As long as this bit is set, this entry may only be touched
+ * through the interface structure.
+ * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
+ * transfer (either TX or RX depending on the queue). The entry should
+ * only be touched after the device has signaled it is done with it.
+ * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
+ * encryption or decryption. The entry should only be touched after
+ * the device has signaled it is done with it.
+ */
+
+enum queue_entry_flags {
+ ENTRY_BCN_ASSIGNED,
+ ENTRY_OWNER_DEVICE_DATA,
+ ENTRY_OWNER_DEVICE_CRYPTO,
+};
+
+/**
+ * struct queue_entry: Entry inside the &struct data_queue
+ *
+ * @flags: Entry flags, see &enum queue_entry_flags.
+ * @queue: The data queue (&struct data_queue) to which this entry belongs.
+ * @skb: The buffer which is currently being transmitted (for TX queue),
+ * or used to directly recieve data in (for RX queue).
+ * @entry_idx: The entry index number.
+ * @priv_data: Private data belonging to this queue entry. The pointer
+ * points to data specific to a particular driver and queue type.
+ */
+struct queue_entry {
+ unsigned long flags;
+
+ struct data_queue *queue;
+
+ struct sk_buff *skb;
+
+ unsigned int entry_idx;
+
+ void *priv_data;
+};
+
+/**
+ * enum queue_index: Queue index type
+ *
+ * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
+ * owned by the hardware then the queue is considered to be full.
+ * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
+ * the hardware and for which we need to run the txdone handler. If this
+ * entry is not owned by the hardware the queue is considered to be empty.
+ * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
+ * will be completed by the hardware next.
+ * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
+ * of the index array.
+ */
+enum queue_index {
+ Q_INDEX,
+ Q_INDEX_DONE,
+ Q_INDEX_CRYPTO,
+ Q_INDEX_MAX,
+};
+
+/**
+ * struct data_queue: Data queue
+ *
+ * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
+ * @entries: Base address of the &struct queue_entry which are
+ * part of this queue.
+ * @qid: The queue identification, see &enum data_queue_qid.
+ * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
+ * @index_crypt needs to be changed this lock should be grabbed to prevent
+ * index corruption due to concurrency.
+ * @count: Number of frames handled in the queue.
+ * @limit: Maximum number of entries in the queue.
+ * @length: Number of frames in queue.
+ * @index: Index pointers to entry positions in the queue,
+ * use &enum queue_index to get a specific index field.
+ * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
+ * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
+ * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
+ * @data_size: Maximum data size for the frames in this queue.
+ * @desc_size: Hardware descriptor size for the data in this queue.
+ */
+struct data_queue {
+ struct rt2x00_dev *rt2x00dev;
+ struct queue_entry *entries;
+
+ enum data_queue_qid qid;
+
+ spinlock_t lock;
+ unsigned int count;
+ unsigned short limit;
+ unsigned short length;
+ unsigned short index[Q_INDEX_MAX];
+
+ unsigned short aifs;
+ unsigned short cw_min;
+ unsigned short cw_max;
+
+ unsigned short data_size;
+ unsigned short desc_size;
+};
+
+/**
+ * struct data_queue_desc: Data queue description
+ *
+ * The information in this structure is used by drivers
+ * to inform rt2x00lib about the creation of the data queue.
+ *
+ * @entry_num: Maximum number of entries for a queue.
+ * @data_size: Maximum data size for the frames in this queue.
+ * @desc_size: Hardware descriptor size for the data in this queue.
+ * @priv_size: Size of per-queue_entry private data.
+ */
+struct data_queue_desc {
+ unsigned short entry_num;
+ unsigned short data_size;
+ unsigned short desc_size;
+ unsigned short priv_size;
+};
+
+/**
+ * queue_end - Return pointer to the last queue (HELPER MACRO).
+ * @__dev: Pointer to &struct rt2x00_dev
+ *
+ * Using the base rx pointer and the maximum number of available queues,
+ * this macro will return the address of 1 position beyond the end of the
+ * queues array.
+ */
+#define queue_end(__dev) \
+ &(__dev)->rx[(__dev)->data_queues]
+
+/**
+ * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
+ * @__dev: Pointer to &struct rt2x00_dev
+ *
+ * Using the base tx pointer and the maximum number of available TX
+ * queues, this macro will return the address of 1 position beyond
+ * the end of the TX queue array.
+ */
+#define tx_queue_end(__dev) \
+ &(__dev)->tx[(__dev)->hw->queues]
+
+/**
+ * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
+ * @__entry: Pointer where the current queue entry will be stored in.
+ * @__start: Start queue pointer.
+ * @__end: End queue pointer.
+ *
+ * This macro will loop through all queues between &__start and &__end.
+ */
+#define queue_loop(__entry, __start, __end) \
+ for ((__entry) = (__start); \
+ prefetch(&(__entry)[1]), (__entry) != (__end); \
+ (__entry) = &(__entry)[1])
+
+/**
+ * queue_for_each - Loop through all queues
+ * @__dev: Pointer to &struct rt2x00_dev
+ * @__entry: Pointer where the current queue entry will be stored in.
+ *
+ * This macro will loop through all available queues.
+ */
+#define queue_for_each(__dev, __entry) \
+ queue_loop(__entry, (__dev)->rx, queue_end(__dev))
+
+/**
+ * tx_queue_for_each - Loop through the TX queues
+ * @__dev: Pointer to &struct rt2x00_dev
+ * @__entry: Pointer where the current queue entry will be stored in.
+ *
+ * This macro will loop through all TX related queues excluding
+ * the Beacon and Atim queues.
+ */
+#define tx_queue_for_each(__dev, __entry) \
+ queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
+
+/**
+ * txall_queue_for_each - Loop through all TX related queues
+ * @__dev: Pointer to &struct rt2x00_dev
+ * @__entry: Pointer where the current queue entry will be stored in.
+ *
+ * This macro will loop through all TX related queues including
+ * the Beacon and Atim queues.
+ */
+#define txall_queue_for_each(__dev, __entry) \
+ queue_loop(__entry, (__dev)->tx, queue_end(__dev))
+
+/**
+ * rt2x00queue_empty - Check if the queue is empty.
+ * @queue: Queue to check if empty.
+ */
+static inline int rt2x00queue_empty(struct data_queue *queue)
+{
+ return queue->length == 0;
+}
+
+/**
+ * rt2x00queue_full - Check if the queue is full.
+ * @queue: Queue to check if full.
+ */
+static inline int rt2x00queue_full(struct data_queue *queue)
+{
+ return queue->length == queue->limit;
+}
+
+/**
+ * rt2x00queue_free - Check the number of available entries in queue.
+ * @queue: Queue to check.
+ */
+static inline int rt2x00queue_available(struct data_queue *queue)
+{
+ return queue->limit - queue->length;
+}
+
+/**
+ * rt2x00_desc_read - Read a word from the hardware descriptor.
+ * @desc: Base descriptor address
+ * @word: Word index from where the descriptor should be read.
+ * @value: Address where the descriptor value should be written into.
+ */
+static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
+{
+ *value = le32_to_cpu(desc[word]);
+}
+
+/**
+ * rt2x00_desc_write - wrote a word to the hardware descriptor.
+ * @desc: Base descriptor address
+ * @word: Word index from where the descriptor should be written.
+ * @value: Value that should be written into the descriptor.
+ */
+static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
+{
+ desc[word] = cpu_to_le32(value);
+}
+
+#endif /* RT2X00QUEUE_H */
/*
* TX result flags.
*/
-enum TX_STATUS {
+enum tx_status {
TX_SUCCESS = 0,
TX_SUCCESS_RETRY = 1,
TX_FAIL_RETRY = 2,
+++ /dev/null
-/*
- Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
- <http://rt2x00.serialmonkey.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: rt2x00
- Abstract: rt2x00 ring datastructures and routines
- */
-
-#ifndef RT2X00RING_H
-#define RT2X00RING_H
-
-/*
- * skb_desc
- * Descriptor information for the skb buffer
- */
-struct skb_desc {
- unsigned int frame_type;
-
- unsigned int desc_len;
- unsigned int data_len;
-
- void *desc;
- void *data;
-
- struct data_ring *ring;
- struct data_entry *entry;
-};
-
-static inline struct skb_desc* get_skb_desc(struct sk_buff *skb)
-{
- return (struct skb_desc*)&skb->cb[0];
-}
-
-/*
- * rxdata_entry_desc
- * Summary of information that has been read from the
- * RX frame descriptor.
- */
-struct rxdata_entry_desc {
- int signal;
- int rssi;
- int ofdm;
- int size;
- int flags;
- int my_bss;
-};
-
-/*
- * txdata_entry_desc
- * Summary of information that should be written into the
- * descriptor for sending a TX frame.
- */
-struct txdata_entry_desc {
- unsigned long flags;
-#define ENTRY_TXDONE 1
-#define ENTRY_TXD_RTS_FRAME 2
-#define ENTRY_TXD_OFDM_RATE 3
-#define ENTRY_TXD_MORE_FRAG 4
-#define ENTRY_TXD_REQ_TIMESTAMP 5
-#define ENTRY_TXD_BURST 6
-#define ENTRY_TXD_ACK 7
-
-/*
- * Queue ID. ID's 0-4 are data TX rings
- */
- int queue;
-#define QUEUE_MGMT 13
-#define QUEUE_RX 14
-#define QUEUE_OTHER 15
-
- /*
- * PLCP values.
- */
- u16 length_high;
- u16 length_low;
- u16 signal;
- u16 service;
-
- /*
- * Timing information
- */
- int aifs;
- int ifs;
- int cw_min;
- int cw_max;
-};
-
-/*
- * data_entry
- * The data ring is a list of data entries.
- * Each entry holds a reference to the descriptor
- * and the data buffer. For TX rings the reference to the
- * sk_buff of the packet being transmitted is also stored here.
- */
-struct data_entry {
- /*
- * Status flags
- */
- unsigned long flags;
-#define ENTRY_OWNER_NIC 1
-
- /*
- * Ring we belong to.
- */
- struct data_ring *ring;
-
- /*
- * sk_buff for the packet which is being transmitted
- * in this entry (Only used with TX related rings).
- */
- struct sk_buff *skb;
-
- /*
- * Store a ieee80211_tx_status structure in each
- * ring entry, this will optimize the txdone
- * handler.
- */
- struct ieee80211_tx_status tx_status;
-
- /*
- * private pointer specific to driver.
- */
- void *priv;
-
- /*
- * Data address for this entry.
- */
- void *data_addr;
- dma_addr_t data_dma;
-
- /*
- * Entry identification number (index).
- */
- unsigned int entry_idx;
-};
-
-/*
- * data_ring
- * Data rings are used by the device to send and receive packets.
- * The data_addr is the base address of the data memory.
- * To determine at which point in the ring we are,
- * have to use the rt2x00_ring_index_*() functions.
- */
-struct data_ring {
- /*
- * Pointer to main rt2x00dev structure where this
- * ring belongs to.
- */
- struct rt2x00_dev *rt2x00dev;
-
- /*
- * Base address for the device specific data entries.
- */
- struct data_entry *entry;
-
- /*
- * TX queue statistic info.
- */
- struct ieee80211_tx_queue_stats_data stats;
-
- /*
- * TX Queue parameters.
- */
- struct ieee80211_tx_queue_params tx_params;
-
- /*
- * Base address for data ring.
- */
- dma_addr_t data_dma;
- void *data_addr;
-
- /*
- * Queue identification number:
- * RX: 0
- * TX: IEEE80211_TX_*
- */
- unsigned int queue_idx;
-
- /*
- * Index variables.
- */
- u16 index;
- u16 index_done;
-
- /*
- * Size of packet and descriptor in bytes.
- */
- u16 data_size;
- u16 desc_size;
-};
-
-/*
- * Handlers to determine the address of the current device specific
- * data entry, where either index or index_done points to.
- */
-static inline struct data_entry *rt2x00_get_data_entry(struct data_ring *ring)
-{
- return &ring->entry[ring->index];
-}
-
-static inline struct data_entry *rt2x00_get_data_entry_done(struct data_ring
- *ring)
-{
- return &ring->entry[ring->index_done];
-}
-
-/*
- * Total ring memory
- */
-static inline int rt2x00_get_ring_size(struct data_ring *ring)
-{
- return ring->stats.limit * (ring->desc_size + ring->data_size);
-}
-
-/*
- * Ring index manipulation functions.
- */
-static inline void rt2x00_ring_index_inc(struct data_ring *ring)
-{
- ring->index++;
- if (ring->index >= ring->stats.limit)
- ring->index = 0;
- ring->stats.len++;
-}
-
-static inline void rt2x00_ring_index_done_inc(struct data_ring *ring)
-{
- ring->index_done++;
- if (ring->index_done >= ring->stats.limit)
- ring->index_done = 0;
- ring->stats.len--;
- ring->stats.count++;
-}
-
-static inline void rt2x00_ring_index_clear(struct data_ring *ring)
-{
- ring->index = 0;
- ring->index_done = 0;
- ring->stats.len = 0;
- ring->stats.count = 0;
-}
-
-static inline int rt2x00_ring_empty(struct data_ring *ring)
-{
- return ring->stats.len == 0;
-}
-
-static inline int rt2x00_ring_full(struct data_ring *ring)
-{
- return ring->stats.len == ring->stats.limit;
-}
-
-static inline int rt2x00_ring_free(struct data_ring *ring)
-{
- return ring->stats.limit - ring->stats.len;
-}
-
-/*
- * TX/RX Descriptor access functions.
- */
-static inline void rt2x00_desc_read(__le32 *desc,
- const u8 word, u32 *value)
-{
- *value = le32_to_cpu(desc[word]);
-}
-
-static inline void rt2x00_desc_write(__le32 *desc,
- const u8 word, const u32 value)
-{
- desc[word] = cpu_to_le32(value);
-}
-
-#endif /* RT2X00RING_H */
void *buffer, const u16 buffer_length,
const int timeout)
{
- struct usb_device *usb_dev =
- interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
+ struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
int status;
unsigned int i;
unsigned int pipe =
*/
static void rt2x00usb_interrupt_txdone(struct urb *urb)
{
- struct data_entry *entry = (struct data_entry *)urb->context;
- struct data_ring *ring = entry->ring;
- struct rt2x00_dev *rt2x00dev = ring->rt2x00dev;
+ struct queue_entry *entry = (struct queue_entry *)urb->context;
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
+ struct txdone_entry_desc txdesc;
__le32 *txd = (__le32 *)entry->skb->data;
u32 word;
- int tx_status;
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
- !__test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags))
+ !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
rt2x00_desc_read(txd, 0, &word);
/*
* Remove the descriptor data from the buffer.
*/
- skb_pull(entry->skb, ring->desc_size);
+ skb_pull(entry->skb, entry->queue->desc_size);
/*
* Obtain the status about this packet.
*/
- tx_status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
+ txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
+ txdesc.retry = 0;
+ txdesc.control = &priv_tx->control;
- rt2x00lib_txdone(entry, tx_status, 0);
+ rt2x00lib_txdone(entry, &txdesc);
/*
* Make this entry available for reuse.
*/
entry->flags = 0;
- rt2x00_ring_index_done_inc(entry->ring);
+ rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
/*
- * If the data ring was full before the txdone handler
+ * If the data queue was full before the txdone handler
* we must make sure the packet queue in the mac80211 stack
* is reenabled when the txdone handler has finished.
*/
- if (!rt2x00_ring_full(ring))
- ieee80211_wake_queue(rt2x00dev->hw,
- entry->tx_status.control.queue);
+ if (!rt2x00queue_full(entry->queue))
+ ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
}
int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring, struct sk_buff *skb,
+ struct data_queue *queue, struct sk_buff *skb,
struct ieee80211_tx_control *control)
{
- struct usb_device *usb_dev =
- interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
- struct data_entry *entry = rt2x00_get_data_entry(ring);
- struct skb_desc *desc;
+ struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
+ struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
+ struct skb_frame_desc *skbdesc;
u32 length;
- if (rt2x00_ring_full(ring))
+ if (rt2x00queue_full(queue))
return -EINVAL;
- if (test_bit(ENTRY_OWNER_NIC, &entry->flags)) {
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
ERROR(rt2x00dev,
"Arrived at non-free entry in the non-full queue %d.\n"
"Please file bug report to %s.\n",
/*
* Add the descriptor in front of the skb.
*/
- skb_push(skb, ring->desc_size);
- memset(skb->data, 0, ring->desc_size);
+ skb_push(skb, queue->desc_size);
+ memset(skb->data, 0, queue->desc_size);
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len - ring->desc_size;
- desc->desc = skb->data;
- desc->data = skb->data + ring->desc_size;
- desc->ring = ring;
- desc->entry = entry;
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data + queue->desc_size;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = skb->data;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
/*
* Initialize URB and send the frame to the device.
*/
- __set_bit(ENTRY_OWNER_NIC, &entry->flags);
- usb_fill_bulk_urb(entry->priv, usb_dev, usb_sndbulkpipe(usb_dev, 1),
+ __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+ usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
skb->data, length, rt2x00usb_interrupt_txdone, entry);
- usb_submit_urb(entry->priv, GFP_ATOMIC);
+ usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
- rt2x00_ring_index_inc(ring);
+ rt2x00queue_index_inc(queue, Q_INDEX);
return 0;
}
/*
* RX data handlers.
*/
+static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
+{
+ struct sk_buff *skb;
+ unsigned int frame_size;
+
+ /*
+ * As alignment we use 2 and not NET_IP_ALIGN because we need
+ * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
+ * can be 0 on some hardware). We use these 2 bytes for frame
+ * alignment later, we assume that the chance that
+ * header_size % 4 == 2 is bigger then header_size % 2 == 0
+ * and thus optimize alignment by reserving the 2 bytes in
+ * advance.
+ */
+ frame_size = queue->data_size + queue->desc_size;
+ skb = dev_alloc_skb(frame_size + 2);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, 2);
+ skb_put(skb, frame_size);
+
+ return skb;
+}
+
static void rt2x00usb_interrupt_rxdone(struct urb *urb)
{
- struct data_entry *entry = (struct data_entry *)urb->context;
- struct data_ring *ring = entry->ring;
- struct rt2x00_dev *rt2x00dev = ring->rt2x00dev;
+ struct queue_entry *entry = (struct queue_entry *)urb->context;
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct skb_desc *skbdesc;
- struct rxdata_entry_desc desc;
- int header_size;
- int frame_size;
+ struct skb_frame_desc *skbdesc;
+ struct rxdone_entry_desc rxdesc;
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
- !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags))
+ !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
/*
* to be actually valid, or if the urb is signaling
* a problem.
*/
- if (urb->actual_length < entry->ring->desc_size || urb->status)
+ if (urb->actual_length < entry->queue->desc_size || urb->status)
goto skip_entry;
/*
* Fill in skb descriptor
*/
- skbdesc = get_skb_desc(entry->skb);
- skbdesc->ring = ring;
+ skbdesc = get_skb_frame_desc(entry->skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
- memset(&desc, 0, sizeof(desc));
- rt2x00dev->ops->lib->fill_rxdone(entry, &desc);
+ memset(&rxdesc, 0, sizeof(rxdesc));
+ rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
/*
* Allocate a new sk buffer to replace the current one.
* If allocation fails, we should drop the current frame
* so we can recycle the existing sk buffer for the new frame.
- * As alignment we use 2 and not NET_IP_ALIGN because we need
- * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
- * can be 0 on some hardware). We use these 2 bytes for frame
- * alignment later, we assume that the chance that
- * header_size % 4 == 2 is bigger then header_size % 2 == 0
- * and thus optimize alignment by reserving the 2 bytes in
- * advance.
*/
- frame_size = entry->ring->data_size + entry->ring->desc_size;
- skb = dev_alloc_skb(frame_size + 2);
+ skb = rt2x00usb_alloc_rxskb(entry->queue);
if (!skb)
goto skip_entry;
- skb_reserve(skb, 2);
- skb_put(skb, frame_size);
-
- /*
- * The data behind the ieee80211 header must be
- * aligned on a 4 byte boundary.
- */
- hdr = (struct ieee80211_hdr *)entry->skb->data;
- header_size =
- ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
-
- if (header_size % 4 == 0) {
- skb_push(entry->skb, 2);
- memmove(entry->skb->data, entry->skb->data + 2, skb->len - 2);
- }
-
- /*
- * Trim the entire buffer down to only contain the valid frame data
- * excluding the device descriptor. The position of the descriptor
- * varies. This means that we should check where the descriptor is
- * and decide if we need to pull the data pointer to exclude the
- * device descriptor.
- */
- if (skbdesc->data > skbdesc->desc)
- skb_pull(entry->skb, skbdesc->desc_len);
- skb_trim(entry->skb, desc.size);
-
/*
* Send the frame to rt2x00lib for further processing.
*/
- rt2x00lib_rxdone(entry, entry->skb, &desc);
+ rt2x00lib_rxdone(entry, &rxdesc);
/*
* Replace current entry's skb with the newly allocated one,
urb->transfer_buffer_length = entry->skb->len;
skip_entry:
- if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) {
- __set_bit(ENTRY_OWNER_NIC, &entry->flags);
+ if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
+ __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
usb_submit_urb(urb, GFP_ATOMIC);
}
- rt2x00_ring_index_inc(ring);
+ rt2x00queue_index_inc(entry->queue, Q_INDEX);
}
/*
*/
void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring;
+ struct queue_entry_priv_usb_rx *priv_rx;
+ struct queue_entry_priv_usb_tx *priv_tx;
+ struct data_queue *queue;
unsigned int i;
rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
REGISTER_TIMEOUT);
/*
- * Cancel all rings.
+ * Cancel all queues.
*/
- ring_for_each(rt2x00dev, ring) {
- for (i = 0; i < ring->stats.limit; i++)
- usb_kill_urb(ring->entry[i].priv);
+ for (i = 0; i < rt2x00dev->rx->limit; i++) {
+ priv_rx = rt2x00dev->rx->entries[i].priv_data;
+ usb_kill_urb(priv_rx->urb);
+ }
+
+ txall_queue_for_each(rt2x00dev, queue) {
+ for (i = 0; i < queue->limit; i++) {
+ priv_tx = queue->entries[i].priv_data;
+ usb_kill_urb(priv_tx->urb);
+ }
}
}
EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
* Device initialization handlers.
*/
void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- struct usb_device *usb_dev =
- interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
+ struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
+ struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
- usb_fill_bulk_urb(entry->priv, usb_dev,
+ usb_fill_bulk_urb(priv_rx->urb, usb_dev,
usb_rcvbulkpipe(usb_dev, 1),
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
- __set_bit(ENTRY_OWNER_NIC, &entry->flags);
- usb_submit_urb(entry->priv, GFP_ATOMIC);
+ __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+ usb_submit_urb(priv_rx->urb, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
entry->flags = 0;
}
EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring)
+ struct data_queue *queue)
{
+ struct queue_entry_priv_usb_rx *priv_rx;
+ struct queue_entry_priv_usb_tx *priv_tx;
+ struct queue_entry_priv_usb_bcn *priv_bcn;
+ struct urb *urb;
+ unsigned int guardian =
+ test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
unsigned int i;
/*
* Allocate the URB's
*/
- for (i = 0; i < ring->stats.limit; i++) {
- ring->entry[i].priv = usb_alloc_urb(0, GFP_KERNEL);
- if (!ring->entry[i].priv)
+ for (i = 0; i < queue->limit; i++) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
return -ENOMEM;
+
+ if (queue->qid == QID_RX) {
+ priv_rx = queue->entries[i].priv_data;
+ priv_rx->urb = urb;
+ } else if (queue->qid == QID_MGMT && guardian) {
+ priv_bcn = queue->entries[i].priv_data;
+ priv_bcn->urb = urb;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ priv_bcn->guardian_urb = urb;
+ } else {
+ priv_tx = queue->entries[i].priv_data;
+ priv_tx->urb = urb;
+ }
}
return 0;
}
static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring)
+ struct data_queue *queue)
{
+ struct queue_entry_priv_usb_rx *priv_rx;
+ struct queue_entry_priv_usb_tx *priv_tx;
+ struct queue_entry_priv_usb_bcn *priv_bcn;
+ struct urb *urb;
+ unsigned int guardian =
+ test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
unsigned int i;
- if (!ring->entry)
+ if (!queue->entries)
return;
- for (i = 0; i < ring->stats.limit; i++) {
- usb_kill_urb(ring->entry[i].priv);
- usb_free_urb(ring->entry[i].priv);
- if (ring->entry[i].skb)
- kfree_skb(ring->entry[i].skb);
+ for (i = 0; i < queue->limit; i++) {
+ if (queue->qid == QID_RX) {
+ priv_rx = queue->entries[i].priv_data;
+ urb = priv_rx->urb;
+ } else if (queue->qid == QID_MGMT && guardian) {
+ priv_bcn = queue->entries[i].priv_data;
+
+ usb_kill_urb(priv_bcn->guardian_urb);
+ usb_free_urb(priv_bcn->guardian_urb);
+
+ urb = priv_bcn->urb;
+ } else {
+ priv_tx = queue->entries[i].priv_data;
+ urb = priv_tx->urb;
+ }
+
+ usb_kill_urb(urb);
+ usb_free_urb(urb);
+ if (queue->entries[i].skb)
+ kfree_skb(queue->entries[i].skb);
}
}
int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring;
+ struct data_queue *queue;
struct sk_buff *skb;
unsigned int entry_size;
unsigned int i;
/*
* Allocate DMA
*/
- ring_for_each(rt2x00dev, ring) {
- status = rt2x00usb_alloc_urb(rt2x00dev, ring);
+ queue_for_each(rt2x00dev, queue) {
+ status = rt2x00usb_alloc_urb(rt2x00dev, queue);
if (status)
goto exit;
}
/*
- * For the RX ring, skb's should be allocated.
+ * For the RX queue, skb's should be allocated.
*/
entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
- for (i = 0; i < rt2x00dev->rx->stats.limit; i++) {
- skb = dev_alloc_skb(NET_IP_ALIGN + entry_size);
+ for (i = 0; i < rt2x00dev->rx->limit; i++) {
+ skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
if (!skb)
goto exit;
- skb_reserve(skb, NET_IP_ALIGN);
- skb_put(skb, entry_size);
-
- rt2x00dev->rx->entry[i].skb = skb;
+ rt2x00dev->rx->entries[i].skb = skb;
}
return 0;
void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring;
+ struct data_queue *queue;
- ring_for_each(rt2x00dev, ring)
- rt2x00usb_free_urb(rt2x00dev, ring);
+ queue_for_each(rt2x00dev, queue)
+ rt2x00usb_free_urb(rt2x00dev, queue);
}
EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
#endif /* CONFIG_PM */
/*
- * rt2x00pci module information.
+ * rt2x00usb module information.
*/
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION("rt2x00 library");
+MODULE_DESCRIPTION("rt2x00 usb library");
MODULE_LICENSE("GPL");
* TX data handlers.
*/
int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
- struct data_ring *ring, struct sk_buff *skb,
+ struct data_queue *queue, struct sk_buff *skb,
struct ieee80211_tx_control *control);
+/**
+ * struct queue_entry_priv_usb_rx: Per RX entry USB specific information
+ *
+ * @urb: Urb structure used for device communication.
+ */
+struct queue_entry_priv_usb_rx {
+ struct urb *urb;
+};
+
+/**
+ * struct queue_entry_priv_usb_tx: Per TX entry USB specific information
+ *
+ * @urb: Urb structure used for device communication.
+ * @control: mac80211 control structure used to transmit data.
+ */
+struct queue_entry_priv_usb_tx {
+ struct urb *urb;
+
+ struct ieee80211_tx_control control;
+};
+
+/**
+ * struct queue_entry_priv_usb_tx: Per TX entry USB specific information
+ *
+ * The first section should match &struct queue_entry_priv_usb_tx exactly.
+ * rt2500usb can use this structure to send a guardian byte when working
+ * with beacons.
+ *
+ * @urb: Urb structure used for device communication.
+ * @control: mac80211 control structure used to transmit data.
+ * @guardian_data: Set to 0, used for sending the guardian data.
+ * @guardian_urb: Urb structure used to send the guardian data.
+ */
+struct queue_entry_priv_usb_bcn {
+ struct urb *urb;
+
+ struct ieee80211_tx_control control;
+
+ unsigned int guardian_data;
+ struct urb *guardian_urb;
+};
+
/*
* Device initialization handlers.
*/
void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry);
+ struct queue_entry *entry);
void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry);
+ struct queue_entry *entry);
int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev);
void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev);
u32 reg;
rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®);
- return rt2x00_get_field32(reg, MAC_CSR13_BIT5);;
+ return rt2x00_get_field32(reg, MAC_CSR13_BIT5);
}
#else
#define rt61pci_rfkill_poll NULL
}
static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word;
- rt2x00_desc_read(rxd, 5, &word);
- rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
- entry->data_dma);
- rt2x00_desc_write(rxd, 5, word);
+ rt2x00_desc_read(priv_rx->desc, 5, &word);
+ rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, priv_rx->dma);
+ rt2x00_desc_write(priv_rx->desc, 5, word);
- rt2x00_desc_read(rxd, 0, &word);
+ rt2x00_desc_read(priv_rx->desc, 0, &word);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
- rt2x00_desc_write(rxd, 0, word);
+ rt2x00_desc_write(priv_rx->desc, 0, word);
}
static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev,
- struct data_entry *entry)
+ struct queue_entry *entry)
{
- __le32 *txd = entry->priv;
+ struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
u32 word;
- rt2x00_desc_read(txd, 1, &word);
+ rt2x00_desc_read(priv_tx->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
- rt2x00_desc_write(txd, 1, word);
+ rt2x00_desc_write(priv_tx->desc, 1, word);
- rt2x00_desc_read(txd, 5, &word);
- rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->ring->queue_idx);
+ rt2x00_desc_read(priv_tx->desc, 5, &word);
+ rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
- rt2x00_desc_write(txd, 5, word);
+ rt2x00_desc_write(priv_tx->desc, 5, word);
- rt2x00_desc_read(txd, 6, &word);
- rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
- entry->data_dma);
- rt2x00_desc_write(txd, 6, word);
+ rt2x00_desc_read(priv_tx->desc, 6, &word);
+ rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, priv_tx->dma);
+ rt2x00_desc_write(priv_tx->desc, 6, word);
- rt2x00_desc_read(txd, 0, &word);
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
- rt2x00_desc_write(txd, 0, word);
+ rt2x00_desc_write(priv_tx->desc, 0, word);
}
-static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev)
+static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
{
+ struct queue_entry_priv_pci_rx *priv_rx;
+ struct queue_entry_priv_pci_tx *priv_tx;
u32 reg;
/*
*/
rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, ®);
rt2x00_set_field32(®, TX_RING_CSR0_AC0_RING_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit);
+ rt2x00dev->tx[0].limit);
rt2x00_set_field32(®, TX_RING_CSR0_AC1_RING_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit);
+ rt2x00dev->tx[1].limit);
rt2x00_set_field32(®, TX_RING_CSR0_AC2_RING_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].stats.limit);
+ rt2x00dev->tx[2].limit);
rt2x00_set_field32(®, TX_RING_CSR0_AC3_RING_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].stats.limit);
+ rt2x00dev->tx[3].limit);
rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg);
rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, ®);
- rt2x00_set_field32(®, TX_RING_CSR1_MGMT_RING_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].stats.limit);
rt2x00_set_field32(®, TX_RING_CSR1_TXD_SIZE,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size /
- 4);
+ rt2x00dev->tx[0].desc_size / 4);
rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg);
+ priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, ®);
- rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma);
+ rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg);
+ priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, ®);
- rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma);
+ rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg);
+ priv_tx = rt2x00dev->tx[2].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, ®);
- rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].data_dma);
+ rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg);
+ priv_tx = rt2x00dev->tx[3].entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, ®);
- rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].data_dma);
+ rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER, priv_tx->dma);
rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg);
- rt2x00pci_register_read(rt2x00dev, MGMT_BASE_CSR, ®);
- rt2x00_set_field32(®, MGMT_BASE_CSR_RING_REGISTER,
- rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].data_dma);
- rt2x00pci_register_write(rt2x00dev, MGMT_BASE_CSR, reg);
-
rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, ®);
- rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE,
- rt2x00dev->rx->stats.limit);
+ rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit);
rt2x00_set_field32(®, RX_RING_CSR_RXD_SIZE,
rt2x00dev->rx->desc_size / 4);
rt2x00_set_field32(®, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4);
rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg);
+ priv_rx = rt2x00dev->rx->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, ®);
- rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER,
- rt2x00dev->rx->data_dma);
+ rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER, priv_rx->dma);
rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg);
rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, ®);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1);
- rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_MGMT, 1);
+ rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_MGMT, 0);
rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg);
rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®);
/*
* Initialize all registers.
*/
- if (rt61pci_init_rings(rt2x00dev) ||
+ if (rt61pci_init_queues(rt2x00dev) ||
rt61pci_init_registers(rt2x00dev) ||
rt61pci_init_bbp(rt2x00dev)) {
ERROR(rt2x00dev, "Register initialization failed.\n");
*/
static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control)
{
- struct skb_desc *skbdesc = get_skb_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
u32 word;
* Start writing the descriptor words.
*/
rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue);
- rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
+ rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags));
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &desc->flags));
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags));
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
- test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs);
+ test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
!!(control->flags &
IEEE80211_TXCTL_LONG_RETRY_LIMIT));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
rt2x00_set_field32(&word, TXD_W0_BURST,
- test_bit(ENTRY_TXD_BURST, &desc->flags));
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
}
return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
}
-static void rt61pci_fill_rxdone(struct data_entry *entry,
- struct rxdata_entry_desc *desc)
+static void rt61pci_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- __le32 *rxd = entry->priv;
+ struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
u32 word0;
u32 word1;
- rt2x00_desc_read(rxd, 0, &word0);
- rt2x00_desc_read(rxd, 1, &word1);
+ rt2x00_desc_read(priv_rx->desc, 0, &word0);
+ rt2x00_desc_read(priv_rx->desc, 1, &word1);
- desc->flags = 0;
+ rxdesc->flags = 0;
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
- desc->flags |= RX_FLAG_FAILED_FCS_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
/*
* Obtain the status about this packet.
*/
- desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
- desc->rssi = rt61pci_agc_to_rssi(entry->ring->rt2x00dev, word1);
- desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
- desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
- desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
+ rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1);
+ rxdesc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+ rxdesc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
}
/*
*/
static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
{
- struct data_ring *ring;
- struct data_entry *entry;
- struct data_entry *entry_done;
- __le32 *txd;
+ struct data_queue *queue;
+ struct queue_entry *entry;
+ struct queue_entry *entry_done;
+ struct queue_entry_priv_pci_tx *priv_tx;
+ struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
u32 old_reg;
int type;
int index;
- int tx_status;
- int retry;
/*
* During each loop we will compare the freshly read
/*
* Skip this entry when it contains an invalid
- * ring identication number.
+ * queue identication number.
*/
type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
- ring = rt2x00lib_get_ring(rt2x00dev, type);
- if (unlikely(!ring))
+ queue = rt2x00queue_get_queue(rt2x00dev, type);
+ if (unlikely(!queue))
continue;
/*
* index number.
*/
index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE);
- if (unlikely(index >= ring->stats.limit))
+ if (unlikely(index >= queue->limit))
continue;
- entry = &ring->entry[index];
- txd = entry->priv;
- rt2x00_desc_read(txd, 0, &word);
+ entry = &queue->entries[index];
+ priv_tx = entry->priv_data;
+ rt2x00_desc_read(priv_tx->desc, 0, &word);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
return;
- entry_done = rt2x00_get_data_entry_done(ring);
+ entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
while (entry != entry_done) {
- /* Catch up. Just report any entries we missed as
- * failed. */
+ /* Catch up.
+ * Just report any entries we missed as failed.
+ */
WARNING(rt2x00dev,
- "TX status report missed for entry %p\n",
- entry_done);
- rt2x00pci_txdone(rt2x00dev, entry_done, TX_FAIL_OTHER,
- 0);
- entry_done = rt2x00_get_data_entry_done(ring);
+ "TX status report missed for entry %d\n",
+ entry_done->entry_idx);
+
+ txdesc.status = TX_FAIL_OTHER;
+ txdesc.retry = 0;
+
+ rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc);
+ entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
}
/*
* Obtain the status about this packet.
*/
- tx_status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT);
- retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
+ txdesc.status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT);
+ txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
- rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry);
+ rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
}
}
struct ieee80211_tx_control *control)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct skb_desc *desc;
- struct data_ring *ring;
- struct data_entry *entry;
+ struct skb_frame_desc *skbdesc;
+ struct data_queue *queue;
+ struct queue_entry *entry;
/*
* Just in case the ieee80211 doesn't set this,
* initialization.
*/
control->queue = IEEE80211_TX_QUEUE_BEACON;
- ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
- entry = rt2x00_get_data_entry(ring);
+ queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
/*
* We need to append the descriptor in front of the
* beacon frame.
*/
- if (skb_headroom(skb) < TXD_DESC_SIZE) {
- if (pskb_expand_head(skb, TXD_DESC_SIZE, 0, GFP_ATOMIC)) {
+ if (skb_headroom(skb) < queue->desc_size) {
+ if (pskb_expand_head(skb, queue->desc_size, 0, GFP_ATOMIC)) {
dev_kfree_skb(skb);
return -ENOMEM;
}
/*
* Add the descriptor in front of the skb.
*/
- skb_push(skb, ring->desc_size);
- memset(skb->data, 0, ring->desc_size);
+ skb_push(skb, queue->desc_size);
+ memset(skb->data, 0, queue->desc_size);
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len - ring->desc_size;
- desc->desc = skb->data;
- desc->data = skb->data + ring->desc_size;
- desc->ring = ring;
- desc->entry = entry;
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data + queue->desc_size;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = skb->data;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
.config = rt61pci_config,
};
+static const struct data_queue_desc rt61pci_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = RXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_rx),
+};
+
+static const struct data_queue_desc rt61pci_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
+static const struct data_queue_desc rt61pci_queue_bcn = {
+ .entry_num = BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXINFO_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_pci_tx),
+};
+
static const struct rt2x00_ops rt61pci_ops = {
.name = KBUILD_MODNAME,
- .rxd_size = RXD_DESC_SIZE,
- .txd_size = TXD_DESC_SIZE,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
+ .rx = &rt61pci_queue_rx,
+ .tx = &rt61pci_queue_tx,
+ .bcn = &rt61pci_queue_bcn,
.lib = &rt61pci_rt2x00_ops,
.hw = &rt61pci_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
* DMA descriptor defines.
*/
#define TXD_DESC_SIZE ( 16 * sizeof(__le32) )
+#define TXINFO_SIZE ( 6 * sizeof(__le32) )
#define RXD_DESC_SIZE ( 16 * sizeof(__le32) )
/*
*/
static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txdata_entry_desc *desc,
+ struct txentry_desc *txdesc,
struct ieee80211_tx_control *control)
{
- struct skb_desc *skbdesc = get_skb_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = skbdesc->desc;
u32 word;
* Start writing the descriptor words.
*/
rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue);
- rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
+ rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 5, &word);
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_BURST,
- test_bit(ENTRY_TXD_BURST, &desc->flags));
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags));
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &desc->flags));
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags));
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
- test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs);
+ test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
!!(control->flags &
IEEE80211_TXCTL_LONG_RETRY_LIMIT));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
rt2x00_set_field32(&word, TXD_W0_BURST2,
- test_bit(ENTRY_TXD_BURST, &desc->flags));
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
}
return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
}
-static void rt73usb_fill_rxdone(struct data_entry *entry,
- struct rxdata_entry_desc *desc)
+static void rt73usb_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
{
- struct skb_desc *skbdesc = get_skb_desc(entry->skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *rxd = (__le32 *)entry->skb->data;
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *)entry->skb->data + entry->queue->desc_size;
+ int header_size = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
u32 word0;
u32 word1;
rt2x00_desc_read(rxd, 0, &word0);
rt2x00_desc_read(rxd, 1, &word1);
- desc->flags = 0;
+ rxdesc->flags = 0;
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
- desc->flags |= RX_FLAG_FAILED_FCS_CRC;
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
/*
* Obtain the status about this packet.
*/
- desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
- desc->rssi = rt73usb_agc_to_rssi(entry->ring->rt2x00dev, word1);
- desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
- desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
- desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
+ rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1);
+ rxdesc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+ rxdesc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
+
+ /*
+ * The data behind the ieee80211 header must be
+ * aligned on a 4 byte boundary.
+ */
+ if (header_size % 4 == 0) {
+ skb_push(entry->skb, 2);
+ memmove(entry->skb->data, entry->skb->data + 2,
+ entry->skb->len - 2);
+ }
/*
* Set descriptor and data pointer.
*/
+ skbdesc->data = entry->skb->data + entry->queue->desc_size;
+ skbdesc->data_len = entry->queue->data_size;
skbdesc->desc = entry->skb->data;
- skbdesc->desc_len = entry->ring->desc_size;
- skbdesc->data = entry->skb->data + entry->ring->desc_size;
- skbdesc->data_len = desc->size;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * Remove descriptor from skb buffer and trim the whole thing
+ * down to only contain data.
+ */
+ skb_pull(entry->skb, skbdesc->desc_len);
+ skb_trim(entry->skb, rxdesc->size);
}
/*
struct ieee80211_tx_control *control)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct skb_desc *desc;
- struct data_ring *ring;
- struct data_entry *entry;
+ struct skb_frame_desc *skbdesc;
+ struct data_queue *queue;
+ struct queue_entry *entry;
int timeout;
/*
* initialization.
*/
control->queue = IEEE80211_TX_QUEUE_BEACON;
- ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
- entry = rt2x00_get_data_entry(ring);
+ queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
/*
* Add the descriptor in front of the skb.
*/
- skb_push(skb, ring->desc_size);
- memset(skb->data, 0, ring->desc_size);
+ skb_push(skb, queue->desc_size);
+ memset(skb->data, 0, queue->desc_size);
/*
* Fill in skb descriptor
*/
- desc = get_skb_desc(skb);
- desc->desc_len = ring->desc_size;
- desc->data_len = skb->len - ring->desc_size;
- desc->desc = skb->data;
- desc->data = skb->data + ring->desc_size;
- desc->ring = ring;
- desc->entry = entry;
+ skbdesc = get_skb_frame_desc(skb);
+ memset(skbdesc, 0, sizeof(*skbdesc));
+ skbdesc->data = skb->data + queue->desc_size;
+ skbdesc->data_len = queue->data_size;
+ skbdesc->desc = skb->data;
+ skbdesc->desc_len = queue->desc_size;
+ skbdesc->entry = entry;
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
.config = rt73usb_config,
};
+static const struct data_queue_desc rt73usb_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = RXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_rx),
+};
+
+static const struct data_queue_desc rt73usb_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = DATA_FRAME_SIZE,
+ .desc_size = TXD_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_tx),
+};
+
+static const struct data_queue_desc rt73usb_queue_bcn = {
+ .entry_num = BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXINFO_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb_tx),
+};
+
static const struct rt2x00_ops rt73usb_ops = {
.name = KBUILD_MODNAME,
- .rxd_size = RXD_DESC_SIZE,
- .txd_size = TXD_DESC_SIZE,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
+ .rx = &rt73usb_queue_rx,
+ .tx = &rt73usb_queue_tx,
+ .bcn = &rt73usb_queue_bcn,
.lib = &rt73usb_rt2x00_ops,
.hw = &rt73usb_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
* DMA descriptor defines.
*/
#define TXD_DESC_SIZE ( 6 * sizeof(__le32) )
+#define TXINFO_SIZE ( 6 * sizeof(__le32) )
#define RXD_DESC_SIZE ( 6 * sizeof(__le32) )
/*