2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 queue specific routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/dma-mapping.h>
31 #include "rt2x00lib.h"
33 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry)
36 unsigned int frame_size;
37 unsigned int reserved_size;
39 struct skb_frame_desc *skbdesc;
42 * The frame size includes descriptor size, because the
43 * hardware directly receive the frame into the skbuffer.
45 frame_size = entry->queue->data_size + entry->queue->desc_size;
48 * Reserve a few bytes extra headroom to allow drivers some moving
49 * space (e.g. for alignment), while keeping the skb aligned.
56 skb = dev_alloc_skb(frame_size + reserved_size);
60 skb_reserve(skb, reserved_size);
61 skb_put(skb, frame_size);
66 skbdesc = get_skb_frame_desc(skb);
67 memset(skbdesc, 0, sizeof(*skbdesc));
68 skbdesc->entry = entry;
70 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
71 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
75 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
81 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
83 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
85 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
87 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
89 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
91 void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
93 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
95 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
96 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
98 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
101 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
102 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
104 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
108 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
110 rt2x00queue_unmap_skb(rt2x00dev, skb);
111 dev_kfree_skb_any(skb);
114 void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
115 struct txentry_desc *txdesc)
117 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
118 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
119 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
120 struct ieee80211_rate *rate =
121 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
122 const struct rt2x00_rate *hwrate;
123 unsigned int data_length;
124 unsigned int duration;
125 unsigned int residual;
127 memset(txdesc, 0, sizeof(*txdesc));
130 * Initialize information from queue
132 txdesc->queue = entry->queue->qid;
133 txdesc->cw_min = entry->queue->cw_min;
134 txdesc->cw_max = entry->queue->cw_max;
135 txdesc->aifs = entry->queue->aifs;
137 /* Data length should be extended with 4 bytes for CRC */
138 data_length = entry->skb->len + 4;
141 * Check whether this frame is to be acked.
143 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
144 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
147 * Check if this is a RTS/CTS frame
149 if (ieee80211_is_rts(hdr->frame_control) ||
150 ieee80211_is_cts(hdr->frame_control)) {
151 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
152 if (ieee80211_is_rts(hdr->frame_control))
153 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
155 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
156 if (tx_info->control.rts_cts_rate_idx >= 0)
158 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
162 * Determine retry information.
164 txdesc->retry_limit = tx_info->control.retry_limit;
165 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
166 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
169 * Check if more fragments are pending
171 if (ieee80211_has_morefrags(hdr->frame_control)) {
172 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
173 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
177 * Beacons and probe responses require the tsf timestamp
178 * to be inserted into the frame.
180 if (ieee80211_is_beacon(hdr->frame_control) ||
181 ieee80211_is_probe_resp(hdr->frame_control))
182 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
185 * Determine with what IFS priority this frame should be send.
186 * Set ifs to IFS_SIFS when the this is not the first fragment,
187 * or this fragment came after RTS/CTS.
189 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
190 txdesc->ifs = IFS_SIFS;
191 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
192 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
193 txdesc->ifs = IFS_BACKOFF;
195 txdesc->ifs = IFS_SIFS;
200 * Length calculation depends on OFDM/CCK rate.
202 hwrate = rt2x00_get_rate(rate->hw_value);
203 txdesc->signal = hwrate->plcp;
204 txdesc->service = 0x04;
206 if (hwrate->flags & DEV_RATE_OFDM) {
207 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
209 txdesc->length_high = (data_length >> 6) & 0x3f;
210 txdesc->length_low = data_length & 0x3f;
213 * Convert length to microseconds.
215 residual = get_duration_res(data_length, hwrate->bitrate);
216 duration = get_duration(data_length, hwrate->bitrate);
222 * Check if we need to set the Length Extension
224 if (hwrate->bitrate == 110 && residual <= 30)
225 txdesc->service |= 0x80;
228 txdesc->length_high = (duration >> 8) & 0xff;
229 txdesc->length_low = duration & 0xff;
232 * When preamble is enabled we should set the
233 * preamble bit for the signal.
235 if (rt2x00_get_rate_preamble(rate->hw_value))
236 txdesc->signal |= 0x08;
239 EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
241 void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
242 struct txentry_desc *txdesc)
244 struct data_queue *queue = entry->queue;
245 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
247 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
250 * All processing on the frame has been completed, this means
251 * it is now ready to be dumped to userspace through debugfs.
253 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
256 * Check if we need to kick the queue, there are however a few rules
257 * 1) Don't kick beacon queue
258 * 2) Don't kick unless this is the last in frame in a burst.
259 * When the burst flag is set, this frame is always followed
260 * by another frame which in some way are related to eachother.
261 * This is true for fragments, RTS or CTS-to-self frames.
262 * 3) Rule 2 can be broken when the available entries
263 * in the queue are less then a certain threshold.
265 if (entry->queue->qid == QID_BEACON)
268 if (rt2x00queue_threshold(queue) ||
269 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
270 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
272 EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
274 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
276 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
277 struct txentry_desc txdesc;
278 struct skb_frame_desc *skbdesc;
280 if (unlikely(rt2x00queue_full(queue)))
283 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
284 ERROR(queue->rt2x00dev,
285 "Arrived at non-free entry in the non-full queue %d.\n"
286 "Please file bug report to %s.\n",
287 queue->qid, DRV_PROJECT);
292 * Copy all TX descriptor information into txdesc,
293 * after that we are free to use the skb->cb array
294 * for our information.
297 rt2x00queue_create_tx_descriptor(entry, &txdesc);
300 * skb->cb array is now ours and we are free to use it.
302 skbdesc = get_skb_frame_desc(entry->skb);
303 memset(skbdesc, 0, sizeof(*skbdesc));
304 skbdesc->entry = entry;
306 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
307 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
311 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
312 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
314 __set_bit(ENTRY_DATA_PENDING, &entry->flags);
316 rt2x00queue_index_inc(queue, Q_INDEX);
317 rt2x00queue_write_tx_descriptor(entry, &txdesc);
322 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
323 const enum data_queue_qid queue)
325 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
327 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
328 return &rt2x00dev->tx[queue];
333 if (queue == QID_BEACON)
334 return &rt2x00dev->bcn[0];
335 else if (queue == QID_ATIM && atim)
336 return &rt2x00dev->bcn[1];
340 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
342 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
343 enum queue_index index)
345 struct queue_entry *entry;
346 unsigned long irqflags;
348 if (unlikely(index >= Q_INDEX_MAX)) {
349 ERROR(queue->rt2x00dev,
350 "Entry requested from invalid index type (%d)\n", index);
354 spin_lock_irqsave(&queue->lock, irqflags);
356 entry = &queue->entries[queue->index[index]];
358 spin_unlock_irqrestore(&queue->lock, irqflags);
362 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
364 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
366 unsigned long irqflags;
368 if (unlikely(index >= Q_INDEX_MAX)) {
369 ERROR(queue->rt2x00dev,
370 "Index change on invalid index type (%d)\n", index);
374 spin_lock_irqsave(&queue->lock, irqflags);
376 queue->index[index]++;
377 if (queue->index[index] >= queue->limit)
378 queue->index[index] = 0;
380 if (index == Q_INDEX) {
382 } else if (index == Q_INDEX_DONE) {
387 spin_unlock_irqrestore(&queue->lock, irqflags);
390 static void rt2x00queue_reset(struct data_queue *queue)
392 unsigned long irqflags;
394 spin_lock_irqsave(&queue->lock, irqflags);
398 memset(queue->index, 0, sizeof(queue->index));
400 spin_unlock_irqrestore(&queue->lock, irqflags);
403 void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
405 struct data_queue *queue = rt2x00dev->rx;
408 rt2x00queue_reset(queue);
410 if (!rt2x00dev->ops->lib->init_rxentry)
413 for (i = 0; i < queue->limit; i++)
414 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
418 void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
420 struct data_queue *queue;
423 txall_queue_for_each(rt2x00dev, queue) {
424 rt2x00queue_reset(queue);
426 if (!rt2x00dev->ops->lib->init_txentry)
429 for (i = 0; i < queue->limit; i++)
430 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
435 static int rt2x00queue_alloc_entries(struct data_queue *queue,
436 const struct data_queue_desc *qdesc)
438 struct queue_entry *entries;
439 unsigned int entry_size;
442 rt2x00queue_reset(queue);
444 queue->limit = qdesc->entry_num;
445 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
446 queue->data_size = qdesc->data_size;
447 queue->desc_size = qdesc->desc_size;
450 * Allocate all queue entries.
452 entry_size = sizeof(*entries) + qdesc->priv_size;
453 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
457 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
458 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
459 ((__index) * (__psize)) )
461 for (i = 0; i < queue->limit; i++) {
462 entries[i].flags = 0;
463 entries[i].queue = queue;
464 entries[i].skb = NULL;
465 entries[i].entry_idx = i;
466 entries[i].priv_data =
467 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
468 sizeof(*entries), qdesc->priv_size);
471 #undef QUEUE_ENTRY_PRIV_OFFSET
473 queue->entries = entries;
478 static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
479 struct data_queue *queue)
486 for (i = 0; i < queue->limit; i++) {
487 if (queue->entries[i].skb)
488 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
492 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
493 struct data_queue *queue)
498 for (i = 0; i < queue->limit; i++) {
499 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
502 queue->entries[i].skb = skb;
508 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
510 struct data_queue *queue;
513 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
517 tx_queue_for_each(rt2x00dev, queue) {
518 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
523 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
527 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
528 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
529 rt2x00dev->ops->atim);
534 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
541 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
543 rt2x00queue_uninitialize(rt2x00dev);
548 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
550 struct data_queue *queue;
552 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
554 queue_for_each(rt2x00dev, queue) {
555 kfree(queue->entries);
556 queue->entries = NULL;
560 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
561 struct data_queue *queue, enum data_queue_qid qid)
563 spin_lock_init(&queue->lock);
565 queue->rt2x00dev = rt2x00dev;
572 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
574 struct data_queue *queue;
575 enum data_queue_qid qid;
576 unsigned int req_atim =
577 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
580 * We need the following queues:
584 * Atim: 1 (if required)
586 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
588 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
590 ERROR(rt2x00dev, "Queue allocation failed.\n");
595 * Initialize pointers
597 rt2x00dev->rx = queue;
598 rt2x00dev->tx = &queue[1];
599 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
602 * Initialize queue parameters.
604 * TX: qid = QID_AC_BE + index
605 * TX: cw_min: 2^5 = 32.
606 * TX: cw_max: 2^10 = 1024.
607 * BCN: qid = QID_BEACON
608 * ATIM: qid = QID_ATIM
610 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
613 tx_queue_for_each(rt2x00dev, queue)
614 rt2x00queue_init(rt2x00dev, queue, qid++);
616 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
618 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
623 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
625 kfree(rt2x00dev->rx);
626 rt2x00dev->rx = NULL;
627 rt2x00dev->tx = NULL;
628 rt2x00dev->bcn = NULL;