2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 generic usb device routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/usb.h>
29 #include <linux/bug.h>
32 #include "rt2x00usb.h"
35 * Interfacing with the HW.
37 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 const u8 request, const u8 requesttype,
39 const u16 offset, const u16 value,
40 void *buffer, const u16 buffer_length,
43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
47 (requesttype == USB_VENDOR_REQUEST_IN) ?
48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
51 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
52 status = usb_control_msg(usb_dev, pipe, request, requesttype,
53 value, offset, buffer, buffer_length,
60 * -ENODEV: Device has disappeared, no point continuing.
61 * All other errors: Try again.
63 else if (status == -ENODEV)
68 "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
69 request, offset, status);
73 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
75 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
76 const u8 request, const u8 requesttype,
77 const u16 offset, void *buffer,
78 const u16 buffer_length, const int timeout)
82 BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex));
85 * Check for Cache availability.
87 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
88 ERROR(rt2x00dev, "CSR cache not available.\n");
92 if (requesttype == USB_VENDOR_REQUEST_OUT)
93 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
95 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
96 offset, 0, rt2x00dev->csr.cache,
97 buffer_length, timeout);
99 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
100 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
104 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
106 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
107 const u8 request, const u8 requesttype,
108 const u16 offset, void *buffer,
109 const u16 buffer_length, const int timeout)
113 mutex_lock(&rt2x00dev->usb_cache_mutex);
115 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
116 requesttype, offset, buffer,
117 buffer_length, timeout);
119 mutex_unlock(&rt2x00dev->usb_cache_mutex);
123 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
128 static void rt2x00usb_interrupt_txdone(struct urb *urb)
130 struct queue_entry *entry = (struct queue_entry *)urb->context;
131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132 struct txdone_entry_desc txdesc;
133 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
135 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
136 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
140 * Remove the descriptor data from the buffer.
142 skb_pull(entry->skb, entry->queue->desc_size);
145 * Obtain the status about this packet.
146 * Note that when the status is 0 it does not mean the
147 * frame was send out correctly. It only means the frame
148 * was succesfully pushed to the hardware, we have no
149 * way to determine the transmission status right now.
150 * (Only indirectly by looking at the failed TX counters
154 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
156 __set_bit(TXDONE_FAILURE, &txdesc.flags);
159 rt2x00lib_txdone(entry, &txdesc);
162 * Make this entry available for reuse.
165 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
168 * If the data queue was full before the txdone handler
169 * we must make sure the packet queue in the mac80211 stack
170 * is reenabled when the txdone handler has finished.
172 if (!rt2x00queue_full(entry->queue))
173 ieee80211_wake_queue(rt2x00dev->hw, qid);
176 int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
177 struct data_queue *queue, struct sk_buff *skb)
179 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
180 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
181 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
182 struct skb_frame_desc *skbdesc;
183 struct txentry_desc txdesc;
186 if (rt2x00queue_full(queue))
189 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
191 "Arrived at non-free entry in the non-full queue %d.\n"
192 "Please file bug report to %s.\n",
193 entry->queue->qid, DRV_PROJECT);
198 * Copy all TX descriptor information into txdesc,
199 * after that we are free to use the skb->cb array
200 * for our information.
203 rt2x00queue_create_tx_descriptor(entry, &txdesc);
206 * Add the descriptor in front of the skb.
208 skb_push(skb, queue->desc_size);
209 memset(skb->data, 0, queue->desc_size);
212 * Fill in skb descriptor
214 skbdesc = get_skb_frame_desc(skb);
215 memset(skbdesc, 0, sizeof(*skbdesc));
216 skbdesc->data = skb->data + queue->desc_size;
217 skbdesc->data_len = skb->len - queue->desc_size;
218 skbdesc->desc = skb->data;
219 skbdesc->desc_len = queue->desc_size;
220 skbdesc->entry = entry;
222 rt2x00queue_write_tx_descriptor(entry, &txdesc);
225 * USB devices cannot blindly pass the skb->len as the
226 * length of the data to usb_fill_bulk_urb. Pass the skb
227 * to the driver to determine what the length should be.
229 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, skb);
232 * Initialize URB and send the frame to the device.
234 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
235 usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
236 skb->data, length, rt2x00usb_interrupt_txdone, entry);
237 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
239 rt2x00queue_index_inc(queue, Q_INDEX);
243 EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
248 static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
251 unsigned int frame_size;
252 unsigned int reserved_size;
255 * The frame size includes descriptor size, because the
256 * hardware directly receive the frame into the skbuffer.
258 frame_size = queue->data_size + queue->desc_size;
261 * For the allocation we should keep a few things in mind:
262 * 1) 4byte alignment of 802.11 payload
264 * For (1) we need at most 4 bytes to guarentee the correct
265 * alignment. We are going to optimize the fact that the chance
266 * that the 802.11 header_size % 4 == 2 is much bigger then
267 * anything else. However since we need to move the frame up
268 * to 3 bytes to the front, which means we need to preallocate
276 skb = dev_alloc_skb(frame_size + reserved_size);
280 skb_reserve(skb, reserved_size);
281 skb_put(skb, frame_size);
286 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
288 struct queue_entry *entry = (struct queue_entry *)urb->context;
289 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 struct skb_frame_desc *skbdesc;
292 struct rxdone_entry_desc rxdesc;
293 unsigned int header_size;
296 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
297 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
301 * Check if the received data is simply too small
302 * to be actually valid, or if the urb is signaling
305 if (urb->actual_length < entry->queue->desc_size || urb->status)
309 * Fill in skb descriptor
311 skbdesc = get_skb_frame_desc(entry->skb);
312 memset(skbdesc, 0, sizeof(*skbdesc));
313 skbdesc->entry = entry;
315 memset(&rxdesc, 0, sizeof(rxdesc));
316 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
318 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
321 * The data behind the ieee80211 header must be
322 * aligned on a 4 byte boundary. We already reserved
323 * 2 bytes for header_size % 4 == 2 optimization.
324 * To determine the number of bytes which the data
325 * should be moved to the left, we must add these
326 * 2 bytes to the header_size.
328 align = (header_size + 2) % 4;
331 skb_push(entry->skb, align);
332 /* Move entire frame in 1 command */
333 memmove(entry->skb->data, entry->skb->data + align,
337 /* Update data pointers, trim buffer to correct size */
338 skbdesc->data = entry->skb->data;
339 skb_trim(entry->skb, rxdesc.size);
342 * Allocate a new sk buffer to replace the current one.
343 * If allocation fails, we should drop the current frame
344 * so we can recycle the existing sk buffer for the new frame.
346 skb = rt2x00usb_alloc_rxskb(entry->queue);
351 * Send the frame to rt2x00lib for further processing.
353 rt2x00lib_rxdone(entry, &rxdesc);
356 * Replace current entry's skb with the newly allocated one,
357 * and reinitialize the urb.
360 urb->transfer_buffer = entry->skb->data;
361 urb->transfer_buffer_length = entry->skb->len;
364 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
365 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
366 usb_submit_urb(urb, GFP_ATOMIC);
369 rt2x00queue_index_inc(entry->queue, Q_INDEX);
375 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
377 struct queue_entry_priv_usb *entry_priv;
378 struct queue_entry_priv_usb_bcn *bcn_priv;
381 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
387 for (i = 0; i < rt2x00dev->rx->limit; i++) {
388 entry_priv = rt2x00dev->rx->entries[i].priv_data;
389 usb_kill_urb(entry_priv->urb);
395 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
396 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
397 if (bcn_priv->guardian_urb)
398 usb_kill_urb(bcn_priv->guardian_urb);
401 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
404 * Device initialization handlers.
406 void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
407 struct queue_entry *entry)
409 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
410 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
412 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
413 usb_rcvbulkpipe(usb_dev, 1),
414 entry->skb->data, entry->skb->len,
415 rt2x00usb_interrupt_rxdone, entry);
417 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
418 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
420 EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
422 void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
423 struct queue_entry *entry)
427 EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
429 static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
430 struct data_queue *queue)
432 struct queue_entry_priv_usb *entry_priv;
433 struct queue_entry_priv_usb_bcn *bcn_priv;
436 for (i = 0; i < queue->limit; i++) {
437 entry_priv = queue->entries[i].priv_data;
438 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
439 if (!entry_priv->urb)
444 * If this is not the beacon queue or
445 * no guardian byte was required for the beacon,
448 if (rt2x00dev->bcn != queue ||
449 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
452 for (i = 0; i < queue->limit; i++) {
453 bcn_priv = queue->entries[i].priv_data;
454 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
455 if (!bcn_priv->guardian_urb)
462 static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
463 struct data_queue *queue)
465 struct queue_entry_priv_usb *entry_priv;
466 struct queue_entry_priv_usb_bcn *bcn_priv;
472 for (i = 0; i < queue->limit; i++) {
473 entry_priv = queue->entries[i].priv_data;
474 usb_kill_urb(entry_priv->urb);
475 usb_free_urb(entry_priv->urb);
476 if (queue->entries[i].skb)
477 kfree_skb(queue->entries[i].skb);
481 * If this is not the beacon queue or
482 * no guardian byte was required for the beacon,
485 if (rt2x00dev->bcn != queue ||
486 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
489 for (i = 0; i < queue->limit; i++) {
490 bcn_priv = queue->entries[i].priv_data;
491 usb_kill_urb(bcn_priv->guardian_urb);
492 usb_free_urb(bcn_priv->guardian_urb);
496 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
498 struct data_queue *queue;
500 unsigned int entry_size;
502 int uninitialized_var(status);
507 queue_for_each(rt2x00dev, queue) {
508 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
514 * For the RX queue, skb's should be allocated.
516 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
517 for (i = 0; i < rt2x00dev->rx->limit; i++) {
518 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
522 rt2x00dev->rx->entries[i].skb = skb;
528 rt2x00usb_uninitialize(rt2x00dev);
532 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
534 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
536 struct data_queue *queue;
538 queue_for_each(rt2x00dev, queue)
539 rt2x00usb_free_urb(rt2x00dev, queue);
541 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
544 * USB driver handlers.
546 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
548 kfree(rt2x00dev->rf);
549 rt2x00dev->rf = NULL;
551 kfree(rt2x00dev->eeprom);
552 rt2x00dev->eeprom = NULL;
554 kfree(rt2x00dev->csr.cache);
555 rt2x00dev->csr.cache = NULL;
558 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
560 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
561 if (!rt2x00dev->csr.cache)
564 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
565 if (!rt2x00dev->eeprom)
568 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
575 ERROR_PROBE("Failed to allocate registers.\n");
577 rt2x00usb_free_reg(rt2x00dev);
582 int rt2x00usb_probe(struct usb_interface *usb_intf,
583 const struct usb_device_id *id)
585 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
586 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
587 struct ieee80211_hw *hw;
588 struct rt2x00_dev *rt2x00dev;
591 usb_dev = usb_get_dev(usb_dev);
593 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
595 ERROR_PROBE("Failed to allocate hardware.\n");
597 goto exit_put_device;
600 usb_set_intfdata(usb_intf, hw);
602 rt2x00dev = hw->priv;
603 rt2x00dev->dev = usb_intf;
604 rt2x00dev->ops = ops;
606 mutex_init(&rt2x00dev->usb_cache_mutex);
608 rt2x00dev->usb_maxpacket =
609 usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
610 if (!rt2x00dev->usb_maxpacket)
611 rt2x00dev->usb_maxpacket = 1;
613 retval = rt2x00usb_alloc_reg(rt2x00dev);
615 goto exit_free_device;
617 retval = rt2x00lib_probe_dev(rt2x00dev);
624 rt2x00usb_free_reg(rt2x00dev);
627 ieee80211_free_hw(hw);
630 usb_put_dev(usb_dev);
632 usb_set_intfdata(usb_intf, NULL);
636 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
638 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
640 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
641 struct rt2x00_dev *rt2x00dev = hw->priv;
644 * Free all allocated data.
646 rt2x00lib_remove_dev(rt2x00dev);
647 rt2x00usb_free_reg(rt2x00dev);
648 ieee80211_free_hw(hw);
651 * Free the USB device data.
653 usb_set_intfdata(usb_intf, NULL);
654 usb_put_dev(interface_to_usbdev(usb_intf));
656 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
659 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
661 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
662 struct rt2x00_dev *rt2x00dev = hw->priv;
665 retval = rt2x00lib_suspend(rt2x00dev, state);
669 rt2x00usb_free_reg(rt2x00dev);
672 * Decrease usbdev refcount.
674 usb_put_dev(interface_to_usbdev(usb_intf));
678 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
680 int rt2x00usb_resume(struct usb_interface *usb_intf)
682 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
683 struct rt2x00_dev *rt2x00dev = hw->priv;
686 usb_get_dev(interface_to_usbdev(usb_intf));
688 retval = rt2x00usb_alloc_reg(rt2x00dev);
692 retval = rt2x00lib_resume(rt2x00dev);
699 rt2x00usb_free_reg(rt2x00dev);
703 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
704 #endif /* CONFIG_PM */
707 * rt2x00usb module information.
709 MODULE_AUTHOR(DRV_PROJECT);
710 MODULE_VERSION(DRV_VERSION);
711 MODULE_DESCRIPTION("rt2x00 usb library");
712 MODULE_LICENSE("GPL");