2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/module.h>
28 #include <linux/kmod.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
60 static DEFINE_RWLOCK(hci_task_lock);
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block *nb)
81 return atomic_notifier_chain_register(&hci_notifier, nb);
84 int hci_unregister_notifier(struct notifier_block *nb)
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev *hdev, int result)
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout)
122 DECLARE_WAITQUEUE(wait, current);
125 BT_DBG("%s start", hdev->name);
127 hdev->req_status = HCI_REQ_PEND;
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
133 schedule_timeout(timeout);
135 remove_wait_queue(&hdev->req_wait_q, &wait);
137 if (signal_pending(current))
140 switch (hdev->req_status) {
142 err = -bt_err(hdev->req_result);
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
154 hdev->req_status = hdev->req_result = 0;
156 BT_DBG("%s end: err %d", hdev->name, err);
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
166 /* Serialize all requests */
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
187 BT_DBG("%s %ld", hdev->name, opt);
189 /* Driver initialization */
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
198 skb_queue_purge(&hdev->driver_init);
200 /* Mandatory initialization */
203 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
213 /* Host buffer size */
215 struct hci_cp_host_buffer_size cp;
216 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
217 cp.sco_mtu = HCI_MAX_SCO_SIZE;
218 cp.acl_max_pkt = __cpu_to_le16(0xffff);
219 cp.sco_max_pkt = __cpu_to_le16(0xffff);
220 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
224 /* Read BD Address */
225 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
227 /* Read Voice Setting */
228 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
230 /* Optional initialization */
232 /* Clear Event Filters */
234 struct hci_cp_set_event_flt cp;
235 cp.flt_type = HCI_FLT_CLEAR_ALL;
236 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
239 /* Page timeout ~20 secs */
240 param = __cpu_to_le16(0x8000);
241 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
243 /* Connection accept timeout ~20 secs */
244 param = __cpu_to_le16(0x7d00);
245 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
248 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
252 BT_DBG("%s %x", hdev->name, scan);
254 /* Inquiry and Page scans */
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
258 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
262 BT_DBG("%s %x", hdev->name, auth);
265 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
268 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
272 BT_DBG("%s %x", hdev->name, encrypt);
275 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
278 /* Get HCI device by index.
279 * Device is held on return. */
280 struct hci_dev *hci_dev_get(int index)
282 struct hci_dev *hdev = NULL;
290 read_lock(&hci_dev_list_lock);
291 list_for_each(p, &hci_dev_list) {
292 struct hci_dev *d = list_entry(p, struct hci_dev, list);
293 if (d->id == index) {
294 hdev = hci_dev_hold(d);
298 read_unlock(&hci_dev_list_lock);
302 /* ---- Inquiry support ---- */
303 static void inquiry_cache_flush(struct hci_dev *hdev)
305 struct inquiry_cache *cache = &hdev->inq_cache;
306 struct inquiry_entry *next = cache->list, *e;
308 BT_DBG("cache %p", cache);
317 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
319 struct inquiry_cache *cache = &hdev->inq_cache;
320 struct inquiry_entry *e;
322 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
324 for (e = cache->list; e; e = e->next)
325 if (!bacmp(&e->data.bdaddr, bdaddr))
330 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
332 struct inquiry_cache *cache = &hdev->inq_cache;
333 struct inquiry_entry *e;
335 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
337 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
338 /* Entry not in the cache. Add new one. */
339 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
341 memset(e, 0, sizeof(struct inquiry_entry));
342 e->next = cache->list;
346 memcpy(&e->data, data, sizeof(*data));
347 e->timestamp = jiffies;
348 cache->timestamp = jiffies;
351 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
353 struct inquiry_cache *cache = &hdev->inq_cache;
354 struct inquiry_info *info = (struct inquiry_info *) buf;
355 struct inquiry_entry *e;
358 for (e = cache->list; e && copied < num; e = e->next, copied++) {
359 struct inquiry_data *data = &e->data;
360 bacpy(&info->bdaddr, &data->bdaddr);
361 info->pscan_rep_mode = data->pscan_rep_mode;
362 info->pscan_period_mode = data->pscan_period_mode;
363 info->pscan_mode = data->pscan_mode;
364 memcpy(info->dev_class, data->dev_class, 3);
365 info->clock_offset = data->clock_offset;
369 BT_DBG("cache %p, copied %d", cache, copied);
373 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
375 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
376 struct hci_cp_inquiry cp;
378 BT_DBG("%s", hdev->name);
380 if (test_bit(HCI_INQUIRY, &hdev->flags))
384 memcpy(&cp.lap, &ir->lap, 3);
385 cp.length = ir->length;
386 cp.num_rsp = ir->num_rsp;
387 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
390 int hci_inquiry(void __user *arg)
392 __u8 __user *ptr = arg;
393 struct hci_inquiry_req ir;
394 struct hci_dev *hdev;
395 int err = 0, do_inquiry = 0, max_rsp;
399 if (copy_from_user(&ir, ptr, sizeof(ir)))
402 if (!(hdev = hci_dev_get(ir.dev_id)))
405 hci_dev_lock_bh(hdev);
406 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
407 inquiry_cache_empty(hdev) ||
408 ir.flags & IREQ_CACHE_FLUSH) {
409 inquiry_cache_flush(hdev);
412 hci_dev_unlock_bh(hdev);
414 timeo = ir.length * 2 * HZ;
415 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
418 /* for unlimited number of responses we will use buffer with 255 entries */
419 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
421 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
422 * copy it to the user space.
424 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
429 hci_dev_lock_bh(hdev);
430 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
431 hci_dev_unlock_bh(hdev);
433 BT_DBG("num_rsp %d", ir.num_rsp);
435 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
437 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
450 /* ---- HCI ioctl helpers ---- */
452 int hci_dev_open(__u16 dev)
454 struct hci_dev *hdev;
457 if (!(hdev = hci_dev_get(dev)))
460 BT_DBG("%s %p", hdev->name, hdev);
464 if (test_bit(HCI_UP, &hdev->flags)) {
469 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
470 set_bit(HCI_RAW, &hdev->flags);
472 if (hdev->open(hdev)) {
477 if (!test_bit(HCI_RAW, &hdev->flags)) {
478 atomic_set(&hdev->cmd_cnt, 1);
479 set_bit(HCI_INIT, &hdev->flags);
481 //__hci_request(hdev, hci_reset_req, 0, HZ);
482 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
484 clear_bit(HCI_INIT, &hdev->flags);
489 set_bit(HCI_UP, &hdev->flags);
490 hci_notify(hdev, HCI_DEV_UP);
492 /* Init failed, cleanup */
493 tasklet_kill(&hdev->rx_task);
494 tasklet_kill(&hdev->tx_task);
495 tasklet_kill(&hdev->cmd_task);
497 skb_queue_purge(&hdev->cmd_q);
498 skb_queue_purge(&hdev->rx_q);
503 if (hdev->sent_cmd) {
504 kfree_skb(hdev->sent_cmd);
505 hdev->sent_cmd = NULL;
513 hci_req_unlock(hdev);
518 static int hci_dev_do_close(struct hci_dev *hdev)
520 BT_DBG("%s %p", hdev->name, hdev);
522 hci_req_cancel(hdev, ENODEV);
525 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
526 hci_req_unlock(hdev);
530 /* Kill RX and TX tasks */
531 tasklet_kill(&hdev->rx_task);
532 tasklet_kill(&hdev->tx_task);
534 hci_dev_lock_bh(hdev);
535 inquiry_cache_flush(hdev);
536 hci_conn_hash_flush(hdev);
537 hci_dev_unlock_bh(hdev);
539 hci_notify(hdev, HCI_DEV_DOWN);
545 skb_queue_purge(&hdev->cmd_q);
546 atomic_set(&hdev->cmd_cnt, 1);
547 if (!test_bit(HCI_RAW, &hdev->flags)) {
548 set_bit(HCI_INIT, &hdev->flags);
549 __hci_request(hdev, hci_reset_req, 0, HZ/4);
550 clear_bit(HCI_INIT, &hdev->flags);
554 tasklet_kill(&hdev->cmd_task);
557 skb_queue_purge(&hdev->rx_q);
558 skb_queue_purge(&hdev->cmd_q);
559 skb_queue_purge(&hdev->raw_q);
561 /* Drop last sent command */
562 if (hdev->sent_cmd) {
563 kfree_skb(hdev->sent_cmd);
564 hdev->sent_cmd = NULL;
567 /* After this point our queues are empty
568 * and no tasks are scheduled. */
574 hci_req_unlock(hdev);
580 int hci_dev_close(__u16 dev)
582 struct hci_dev *hdev;
585 if (!(hdev = hci_dev_get(dev)))
587 err = hci_dev_do_close(hdev);
592 int hci_dev_reset(__u16 dev)
594 struct hci_dev *hdev;
597 if (!(hdev = hci_dev_get(dev)))
601 tasklet_disable(&hdev->tx_task);
603 if (!test_bit(HCI_UP, &hdev->flags))
607 skb_queue_purge(&hdev->rx_q);
608 skb_queue_purge(&hdev->cmd_q);
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
618 atomic_set(&hdev->cmd_cnt, 1);
619 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
621 if (!test_bit(HCI_RAW, &hdev->flags))
622 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
625 tasklet_enable(&hdev->tx_task);
626 hci_req_unlock(hdev);
631 int hci_dev_reset_stat(__u16 dev)
633 struct hci_dev *hdev;
636 if (!(hdev = hci_dev_get(dev)))
639 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
646 int hci_dev_cmd(unsigned int cmd, void __user *arg)
648 struct hci_dev *hdev;
649 struct hci_dev_req dr;
652 if (copy_from_user(&dr, arg, sizeof(dr)))
655 if (!(hdev = hci_dev_get(dr.dev_id)))
660 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
664 if (!lmp_encrypt_capable(hdev)) {
669 if (!test_bit(HCI_AUTH, &hdev->flags)) {
670 /* Auth must be enabled first */
671 err = hci_request(hdev, hci_auth_req,
672 dr.dev_opt, HCI_INIT_TIMEOUT);
677 err = hci_request(hdev, hci_encrypt_req,
678 dr.dev_opt, HCI_INIT_TIMEOUT);
682 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
686 hdev->pkt_type = (__u16) dr.dev_opt;
690 hdev->link_policy = (__u16) dr.dev_opt;
694 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
698 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
699 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
703 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
704 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
715 int hci_get_dev_list(void __user *arg)
717 struct hci_dev_list_req *dl;
718 struct hci_dev_req *dr;
720 int n = 0, size, err;
723 if (get_user(dev_num, (__u16 __user *) arg))
726 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
729 size = sizeof(*dl) + dev_num * sizeof(*dr);
731 if (!(dl = kmalloc(size, GFP_KERNEL)))
736 read_lock_bh(&hci_dev_list_lock);
737 list_for_each(p, &hci_dev_list) {
738 struct hci_dev *hdev;
739 hdev = list_entry(p, struct hci_dev, list);
740 (dr + n)->dev_id = hdev->id;
741 (dr + n)->dev_opt = hdev->flags;
745 read_unlock_bh(&hci_dev_list_lock);
748 size = sizeof(*dl) + n * sizeof(*dr);
750 err = copy_to_user(arg, dl, size);
753 return err ? -EFAULT : 0;
756 int hci_get_dev_info(void __user *arg)
758 struct hci_dev *hdev;
759 struct hci_dev_info di;
762 if (copy_from_user(&di, arg, sizeof(di)))
765 if (!(hdev = hci_dev_get(di.dev_id)))
768 strcpy(di.name, hdev->name);
769 di.bdaddr = hdev->bdaddr;
770 di.type = hdev->type;
771 di.flags = hdev->flags;
772 di.pkt_type = hdev->pkt_type;
773 di.acl_mtu = hdev->acl_mtu;
774 di.acl_pkts = hdev->acl_pkts;
775 di.sco_mtu = hdev->sco_mtu;
776 di.sco_pkts = hdev->sco_pkts;
777 di.link_policy = hdev->link_policy;
778 di.link_mode = hdev->link_mode;
780 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
781 memcpy(&di.features, &hdev->features, sizeof(di.features));
783 if (copy_to_user(arg, &di, sizeof(di)))
791 /* ---- Interface to HCI drivers ---- */
793 /* Alloc HCI device */
794 struct hci_dev *hci_alloc_dev(void)
796 struct hci_dev *hdev;
798 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
802 memset(hdev, 0, sizeof(struct hci_dev));
804 skb_queue_head_init(&hdev->driver_init);
808 EXPORT_SYMBOL(hci_alloc_dev);
810 /* Free HCI device */
811 void hci_free_dev(struct hci_dev *hdev)
813 skb_queue_purge(&hdev->driver_init);
815 /* will free via class release */
816 class_device_put(&hdev->class_dev);
818 EXPORT_SYMBOL(hci_free_dev);
820 /* Register HCI device */
821 int hci_register_dev(struct hci_dev *hdev)
823 struct list_head *head = &hci_dev_list, *p;
826 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
828 if (!hdev->open || !hdev->close || !hdev->destruct)
831 write_lock_bh(&hci_dev_list_lock);
833 /* Find first available device id */
834 list_for_each(p, &hci_dev_list) {
835 if (list_entry(p, struct hci_dev, list)->id != id)
840 sprintf(hdev->name, "hci%d", id);
842 list_add(&hdev->list, head);
844 atomic_set(&hdev->refcnt, 1);
845 spin_lock_init(&hdev->lock);
848 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
849 hdev->link_mode = (HCI_LM_ACCEPT);
851 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
852 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
853 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
855 skb_queue_head_init(&hdev->rx_q);
856 skb_queue_head_init(&hdev->cmd_q);
857 skb_queue_head_init(&hdev->raw_q);
859 init_waitqueue_head(&hdev->req_wait_q);
860 init_MUTEX(&hdev->req_lock);
862 inquiry_cache_init(hdev);
864 hci_conn_hash_init(hdev);
866 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
868 atomic_set(&hdev->promisc, 0);
870 write_unlock_bh(&hci_dev_list_lock);
872 hci_register_sysfs(hdev);
874 hci_notify(hdev, HCI_DEV_REG);
878 EXPORT_SYMBOL(hci_register_dev);
880 /* Unregister HCI device */
881 int hci_unregister_dev(struct hci_dev *hdev)
883 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
885 hci_unregister_sysfs(hdev);
887 write_lock_bh(&hci_dev_list_lock);
888 list_del(&hdev->list);
889 write_unlock_bh(&hci_dev_list_lock);
891 hci_dev_do_close(hdev);
893 hci_notify(hdev, HCI_DEV_UNREG);
898 EXPORT_SYMBOL(hci_unregister_dev);
900 /* Suspend HCI device */
901 int hci_suspend_dev(struct hci_dev *hdev)
903 hci_notify(hdev, HCI_DEV_SUSPEND);
906 EXPORT_SYMBOL(hci_suspend_dev);
908 /* Resume HCI device */
909 int hci_resume_dev(struct hci_dev *hdev)
911 hci_notify(hdev, HCI_DEV_RESUME);
914 EXPORT_SYMBOL(hci_resume_dev);
916 /* ---- Interface to upper protocols ---- */
918 /* Register/Unregister protocols.
919 * hci_task_lock is used to ensure that no tasks are running. */
920 int hci_register_proto(struct hci_proto *hp)
924 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
926 if (hp->id >= HCI_MAX_PROTO)
929 write_lock_bh(&hci_task_lock);
931 if (!hci_proto[hp->id])
932 hci_proto[hp->id] = hp;
936 write_unlock_bh(&hci_task_lock);
940 EXPORT_SYMBOL(hci_register_proto);
942 int hci_unregister_proto(struct hci_proto *hp)
946 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
948 if (hp->id >= HCI_MAX_PROTO)
951 write_lock_bh(&hci_task_lock);
953 if (hci_proto[hp->id])
954 hci_proto[hp->id] = NULL;
958 write_unlock_bh(&hci_task_lock);
962 EXPORT_SYMBOL(hci_unregister_proto);
964 int hci_register_cb(struct hci_cb *cb)
966 BT_DBG("%p name %s", cb, cb->name);
968 write_lock_bh(&hci_cb_list_lock);
969 list_add(&cb->list, &hci_cb_list);
970 write_unlock_bh(&hci_cb_list_lock);
974 EXPORT_SYMBOL(hci_register_cb);
976 int hci_unregister_cb(struct hci_cb *cb)
978 BT_DBG("%p name %s", cb, cb->name);
980 write_lock_bh(&hci_cb_list_lock);
982 write_unlock_bh(&hci_cb_list_lock);
986 EXPORT_SYMBOL(hci_unregister_cb);
988 static int hci_send_frame(struct sk_buff *skb)
990 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
997 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
999 if (atomic_read(&hdev->promisc)) {
1001 __net_timestamp(skb);
1003 hci_send_to_sock(hdev, skb);
1006 /* Get rid of skb owner, prior to sending to the driver. */
1009 return hdev->send(skb);
1012 /* Send HCI command */
1013 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1015 int len = HCI_COMMAND_HDR_SIZE + plen;
1016 struct hci_command_hdr *hdr;
1017 struct sk_buff *skb;
1019 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1021 skb = bt_skb_alloc(len, GFP_ATOMIC);
1023 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1027 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1028 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1032 memcpy(skb_put(skb, plen), param, plen);
1034 BT_DBG("skb len %d", skb->len);
1036 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1037 skb->dev = (void *) hdev;
1038 skb_queue_tail(&hdev->cmd_q, skb);
1039 hci_sched_cmd(hdev);
1044 /* Get data from the previously sent command */
1045 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1047 struct hci_command_hdr *hdr;
1049 if (!hdev->sent_cmd)
1052 hdr = (void *) hdev->sent_cmd->data;
1054 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1057 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1059 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1063 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1065 struct hci_acl_hdr *hdr;
1068 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1069 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1070 hdr->dlen = __cpu_to_le16(len);
1072 skb->h.raw = (void *) hdr;
1075 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1077 struct hci_dev *hdev = conn->hdev;
1078 struct sk_buff *list;
1080 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1082 skb->dev = (void *) hdev;
1083 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1084 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1086 if (!(list = skb_shinfo(skb)->frag_list)) {
1087 /* Non fragmented */
1088 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1090 skb_queue_tail(&conn->data_q, skb);
1093 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1095 skb_shinfo(skb)->frag_list = NULL;
1097 /* Queue all fragments atomically */
1098 spin_lock_bh(&conn->data_q.lock);
1100 __skb_queue_tail(&conn->data_q, skb);
1102 skb = list; list = list->next;
1104 skb->dev = (void *) hdev;
1105 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1106 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1108 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1110 __skb_queue_tail(&conn->data_q, skb);
1113 spin_unlock_bh(&conn->data_q.lock);
1119 EXPORT_SYMBOL(hci_send_acl);
1122 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1124 struct hci_dev *hdev = conn->hdev;
1125 struct hci_sco_hdr hdr;
1127 BT_DBG("%s len %d", hdev->name, skb->len);
1129 if (skb->len > hdev->sco_mtu) {
1134 hdr.handle = __cpu_to_le16(conn->handle);
1135 hdr.dlen = skb->len;
1137 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1138 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1140 skb->dev = (void *) hdev;
1141 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1142 skb_queue_tail(&conn->data_q, skb);
1146 EXPORT_SYMBOL(hci_send_sco);
1148 /* ---- HCI TX task (outgoing data) ---- */
1150 /* HCI Connection scheduler */
1151 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1153 struct hci_conn_hash *h = &hdev->conn_hash;
1154 struct hci_conn *conn = NULL;
1155 int num = 0, min = ~0;
1156 struct list_head *p;
1158 /* We don't have to lock device here. Connections are always
1159 * added and removed with TX task disabled. */
1160 list_for_each(p, &h->list) {
1162 c = list_entry(p, struct hci_conn, list);
1164 if (c->type != type || c->state != BT_CONNECTED
1165 || skb_queue_empty(&c->data_q))
1169 if (c->sent < min) {
1176 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1182 BT_DBG("conn %p quote %d", conn, *quote);
1186 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1188 struct hci_conn_hash *h = &hdev->conn_hash;
1189 struct list_head *p;
1192 BT_ERR("%s ACL tx timeout", hdev->name);
1194 /* Kill stalled connections */
1195 list_for_each(p, &h->list) {
1196 c = list_entry(p, struct hci_conn, list);
1197 if (c->type == ACL_LINK && c->sent) {
1198 BT_ERR("%s killing stalled ACL connection %s",
1199 hdev->name, batostr(&c->dst));
1200 hci_acl_disconn(c, 0x13);
1205 static inline void hci_sched_acl(struct hci_dev *hdev)
1207 struct hci_conn *conn;
1208 struct sk_buff *skb;
1211 BT_DBG("%s", hdev->name);
1213 if (!test_bit(HCI_RAW, &hdev->flags)) {
1214 /* ACL tx timeout must be longer than maximum
1215 * link supervision timeout (40.9 seconds) */
1216 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1217 hci_acl_tx_to(hdev);
1220 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1221 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1222 BT_DBG("skb %p len %d", skb, skb->len);
1223 hci_send_frame(skb);
1224 hdev->acl_last_tx = jiffies;
1233 static inline void hci_sched_sco(struct hci_dev *hdev)
1235 struct hci_conn *conn;
1236 struct sk_buff *skb;
1239 BT_DBG("%s", hdev->name);
1241 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1242 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1243 BT_DBG("skb %p len %d", skb, skb->len);
1244 hci_send_frame(skb);
1247 if (conn->sent == ~0)
1253 static void hci_tx_task(unsigned long arg)
1255 struct hci_dev *hdev = (struct hci_dev *) arg;
1256 struct sk_buff *skb;
1258 read_lock(&hci_task_lock);
1260 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1262 /* Schedule queues and send stuff to HCI driver */
1264 hci_sched_acl(hdev);
1266 hci_sched_sco(hdev);
1268 /* Send next queued raw (unknown type) packet */
1269 while ((skb = skb_dequeue(&hdev->raw_q)))
1270 hci_send_frame(skb);
1272 read_unlock(&hci_task_lock);
1275 /* ----- HCI RX task (incoming data proccessing) ----- */
1277 /* ACL data packet */
1278 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1280 struct hci_acl_hdr *hdr = (void *) skb->data;
1281 struct hci_conn *conn;
1282 __u16 handle, flags;
1284 skb_pull(skb, HCI_ACL_HDR_SIZE);
1286 handle = __le16_to_cpu(hdr->handle);
1287 flags = hci_flags(handle);
1288 handle = hci_handle(handle);
1290 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1292 hdev->stat.acl_rx++;
1295 conn = hci_conn_hash_lookup_handle(hdev, handle);
1296 hci_dev_unlock(hdev);
1299 register struct hci_proto *hp;
1301 /* Send to upper protocol */
1302 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1303 hp->recv_acldata(conn, skb, flags);
1307 BT_ERR("%s ACL packet for unknown connection handle %d",
1308 hdev->name, handle);
1314 /* SCO data packet */
1315 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1317 struct hci_sco_hdr *hdr = (void *) skb->data;
1318 struct hci_conn *conn;
1321 skb_pull(skb, HCI_SCO_HDR_SIZE);
1323 handle = __le16_to_cpu(hdr->handle);
1325 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1327 hdev->stat.sco_rx++;
1330 conn = hci_conn_hash_lookup_handle(hdev, handle);
1331 hci_dev_unlock(hdev);
1334 register struct hci_proto *hp;
1336 /* Send to upper protocol */
1337 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1338 hp->recv_scodata(conn, skb);
1342 BT_ERR("%s SCO packet for unknown connection handle %d",
1343 hdev->name, handle);
1349 static void hci_rx_task(unsigned long arg)
1351 struct hci_dev *hdev = (struct hci_dev *) arg;
1352 struct sk_buff *skb;
1354 BT_DBG("%s", hdev->name);
1356 read_lock(&hci_task_lock);
1358 while ((skb = skb_dequeue(&hdev->rx_q))) {
1359 if (atomic_read(&hdev->promisc)) {
1360 /* Send copy to the sockets */
1361 hci_send_to_sock(hdev, skb);
1364 if (test_bit(HCI_RAW, &hdev->flags)) {
1369 if (test_bit(HCI_INIT, &hdev->flags)) {
1370 /* Don't process data packets in this states. */
1371 switch (bt_cb(skb)->pkt_type) {
1372 case HCI_ACLDATA_PKT:
1373 case HCI_SCODATA_PKT:
1380 switch (bt_cb(skb)->pkt_type) {
1382 hci_event_packet(hdev, skb);
1385 case HCI_ACLDATA_PKT:
1386 BT_DBG("%s ACL data packet", hdev->name);
1387 hci_acldata_packet(hdev, skb);
1390 case HCI_SCODATA_PKT:
1391 BT_DBG("%s SCO data packet", hdev->name);
1392 hci_scodata_packet(hdev, skb);
1401 read_unlock(&hci_task_lock);
1404 static void hci_cmd_task(unsigned long arg)
1406 struct hci_dev *hdev = (struct hci_dev *) arg;
1407 struct sk_buff *skb;
1409 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1411 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1412 BT_ERR("%s command tx timeout", hdev->name);
1413 atomic_set(&hdev->cmd_cnt, 1);
1416 /* Send queued commands */
1417 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1419 kfree_skb(hdev->sent_cmd);
1421 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1422 atomic_dec(&hdev->cmd_cnt);
1423 hci_send_frame(skb);
1424 hdev->cmd_last_tx = jiffies;
1426 skb_queue_head(&hdev->cmd_q, skb);
1427 hci_sched_cmd(hdev);