2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode;
45 struct inquiry_entry {
46 struct inquiry_entry *next;
48 struct inquiry_data data;
51 struct inquiry_cache {
54 struct inquiry_entry *list;
57 struct hci_conn_hash {
58 struct list_head list;
65 struct list_head list;
85 __u16 sniff_min_interval;
86 __u16 sniff_max_interval;
96 unsigned int acl_pkts;
97 unsigned int sco_pkts;
99 unsigned long cmd_last_tx;
100 unsigned long acl_last_tx;
101 unsigned long sco_last_tx;
103 struct tasklet_struct cmd_task;
104 struct tasklet_struct rx_task;
105 struct tasklet_struct tx_task;
107 struct sk_buff_head rx_q;
108 struct sk_buff_head raw_q;
109 struct sk_buff_head cmd_q;
111 struct sk_buff *sent_cmd;
113 struct semaphore req_lock;
114 wait_queue_head_t req_wait_q;
118 struct inquiry_cache inq_cache;
119 struct hci_conn_hash conn_hash;
121 struct hci_dev_stats stat;
123 struct sk_buff_head driver_init;
130 struct device *parent;
133 struct module *owner;
135 int (*open)(struct hci_dev *hdev);
136 int (*close)(struct hci_dev *hdev);
137 int (*flush)(struct hci_dev *hdev);
138 int (*send)(struct sk_buff *skb);
139 void (*destruct)(struct hci_dev *hdev);
140 void (*notify)(struct hci_dev *hdev, unsigned int evt);
141 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
145 struct list_head list;
166 struct sk_buff_head data_q;
168 struct timer_list disc_timer;
169 struct timer_list idle_timer;
171 struct work_struct work;
175 struct hci_dev *hdev;
180 struct hci_conn *link;
183 extern struct hci_proto *hci_proto[];
184 extern struct list_head hci_dev_list;
185 extern struct list_head hci_cb_list;
186 extern rwlock_t hci_dev_list_lock;
187 extern rwlock_t hci_cb_list_lock;
189 /* ----- Inquiry cache ----- */
190 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
191 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
193 #define inquiry_cache_lock(c) spin_lock(&c->lock)
194 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
195 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
196 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
198 static inline void inquiry_cache_init(struct hci_dev *hdev)
200 struct inquiry_cache *c = &hdev->inq_cache;
201 spin_lock_init(&c->lock);
205 static inline int inquiry_cache_empty(struct hci_dev *hdev)
207 struct inquiry_cache *c = &hdev->inq_cache;
208 return (c->list == NULL);
211 static inline long inquiry_cache_age(struct hci_dev *hdev)
213 struct inquiry_cache *c = &hdev->inq_cache;
214 return jiffies - c->timestamp;
217 static inline long inquiry_entry_age(struct inquiry_entry *e)
219 return jiffies - e->timestamp;
222 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
223 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
225 /* ----- HCI Connections ----- */
228 HCI_CONN_ENCRYPT_PEND,
229 HCI_CONN_RSWITCH_PEND,
230 HCI_CONN_MODE_CHANGE_PEND,
233 static inline void hci_conn_hash_init(struct hci_dev *hdev)
235 struct hci_conn_hash *h = &hdev->conn_hash;
236 INIT_LIST_HEAD(&h->list);
237 spin_lock_init(&h->lock);
242 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
244 struct hci_conn_hash *h = &hdev->conn_hash;
245 list_add(&c->list, &h->list);
246 if (c->type == ACL_LINK)
252 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
254 struct hci_conn_hash *h = &hdev->conn_hash;
256 if (c->type == ACL_LINK)
262 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
265 struct hci_conn_hash *h = &hdev->conn_hash;
269 list_for_each(p, &h->list) {
270 c = list_entry(p, struct hci_conn, list);
271 if (c->handle == handle)
277 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
278 __u8 type, bdaddr_t *ba)
280 struct hci_conn_hash *h = &hdev->conn_hash;
284 list_for_each(p, &h->list) {
285 c = list_entry(p, struct hci_conn, list);
286 if (c->type == type && !bacmp(&c->dst, ba))
292 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
293 void hci_add_sco(struct hci_conn *conn, __u16 handle);
295 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
296 int hci_conn_del(struct hci_conn *conn);
297 void hci_conn_hash_flush(struct hci_dev *hdev);
299 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
300 int hci_conn_auth(struct hci_conn *conn);
301 int hci_conn_encrypt(struct hci_conn *conn);
302 int hci_conn_change_link_key(struct hci_conn *conn);
303 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
305 void hci_conn_enter_active_mode(struct hci_conn *conn);
306 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
308 static inline void hci_conn_hold(struct hci_conn *conn)
310 atomic_inc(&conn->refcnt);
311 del_timer(&conn->disc_timer);
314 static inline void hci_conn_put(struct hci_conn *conn)
316 if (atomic_dec_and_test(&conn->refcnt)) {
318 if (conn->type == ACL_LINK) {
319 del_timer(&conn->idle_timer);
320 if (conn->state == BT_CONNECTED) {
321 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
325 timeo = msecs_to_jiffies(10);
327 timeo = msecs_to_jiffies(10);
328 mod_timer(&conn->disc_timer, jiffies + timeo);
332 /* ----- HCI tasks ----- */
333 static inline void hci_sched_cmd(struct hci_dev *hdev)
335 tasklet_schedule(&hdev->cmd_task);
338 static inline void hci_sched_rx(struct hci_dev *hdev)
340 tasklet_schedule(&hdev->rx_task);
343 static inline void hci_sched_tx(struct hci_dev *hdev)
345 tasklet_schedule(&hdev->tx_task);
348 /* ----- HCI Devices ----- */
349 static inline void __hci_dev_put(struct hci_dev *d)
351 if (atomic_dec_and_test(&d->refcnt))
355 static inline void hci_dev_put(struct hci_dev *d)
358 module_put(d->owner);
361 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
363 atomic_inc(&d->refcnt);
367 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
369 if (try_module_get(d->owner))
370 return __hci_dev_hold(d);
374 #define hci_dev_lock(d) spin_lock(&d->lock)
375 #define hci_dev_unlock(d) spin_unlock(&d->lock)
376 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
377 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
379 struct hci_dev *hci_dev_get(int index);
380 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
382 struct hci_dev *hci_alloc_dev(void);
383 void hci_free_dev(struct hci_dev *hdev);
384 int hci_register_dev(struct hci_dev *hdev);
385 int hci_unregister_dev(struct hci_dev *hdev);
386 int hci_suspend_dev(struct hci_dev *hdev);
387 int hci_resume_dev(struct hci_dev *hdev);
388 int hci_dev_open(__u16 dev);
389 int hci_dev_close(__u16 dev);
390 int hci_dev_reset(__u16 dev);
391 int hci_dev_reset_stat(__u16 dev);
392 int hci_dev_cmd(unsigned int cmd, void __user *arg);
393 int hci_get_dev_list(void __user *arg);
394 int hci_get_dev_info(void __user *arg);
395 int hci_get_conn_list(void __user *arg);
396 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
397 int hci_inquiry(void __user *arg);
399 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
401 /* Receive frame from HCI drivers */
402 static inline int hci_recv_frame(struct sk_buff *skb)
404 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
405 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
406 && !test_bit(HCI_INIT, &hdev->flags))) {
412 bt_cb(skb)->incoming = 1;
415 __net_timestamp(skb);
417 /* Queue frame for rx task */
418 skb_queue_tail(&hdev->rx_q, skb);
423 int hci_register_sysfs(struct hci_dev *hdev);
424 void hci_unregister_sysfs(struct hci_dev *hdev);
425 void hci_conn_add_sysfs(struct hci_conn *conn);
426 void hci_conn_del_sysfs(struct hci_conn *conn);
428 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
430 /* ----- LMP capabilities ----- */
431 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
432 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
433 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
434 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
436 /* ----- HCI protocols ----- */
444 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
445 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
446 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
447 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
448 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
449 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
450 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
453 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
455 register struct hci_proto *hp;
458 hp = hci_proto[HCI_PROTO_L2CAP];
459 if (hp && hp->connect_ind)
460 mask |= hp->connect_ind(hdev, bdaddr, type);
462 hp = hci_proto[HCI_PROTO_SCO];
463 if (hp && hp->connect_ind)
464 mask |= hp->connect_ind(hdev, bdaddr, type);
469 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
471 register struct hci_proto *hp;
473 hp = hci_proto[HCI_PROTO_L2CAP];
474 if (hp && hp->connect_cfm)
475 hp->connect_cfm(conn, status);
477 hp = hci_proto[HCI_PROTO_SCO];
478 if (hp && hp->connect_cfm)
479 hp->connect_cfm(conn, status);
482 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
484 register struct hci_proto *hp;
486 hp = hci_proto[HCI_PROTO_L2CAP];
487 if (hp && hp->disconn_ind)
488 hp->disconn_ind(conn, reason);
490 hp = hci_proto[HCI_PROTO_SCO];
491 if (hp && hp->disconn_ind)
492 hp->disconn_ind(conn, reason);
495 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
497 register struct hci_proto *hp;
499 hp = hci_proto[HCI_PROTO_L2CAP];
500 if (hp && hp->auth_cfm)
501 hp->auth_cfm(conn, status);
503 hp = hci_proto[HCI_PROTO_SCO];
504 if (hp && hp->auth_cfm)
505 hp->auth_cfm(conn, status);
508 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
510 register struct hci_proto *hp;
512 hp = hci_proto[HCI_PROTO_L2CAP];
513 if (hp && hp->encrypt_cfm)
514 hp->encrypt_cfm(conn, status);
516 hp = hci_proto[HCI_PROTO_SCO];
517 if (hp && hp->encrypt_cfm)
518 hp->encrypt_cfm(conn, status);
521 int hci_register_proto(struct hci_proto *hproto);
522 int hci_unregister_proto(struct hci_proto *hproto);
524 /* ----- HCI callbacks ----- */
526 struct list_head list;
530 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
531 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
532 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
533 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
536 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
540 hci_proto_auth_cfm(conn, status);
542 read_lock_bh(&hci_cb_list_lock);
543 list_for_each(p, &hci_cb_list) {
544 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
546 cb->auth_cfm(conn, status);
548 read_unlock_bh(&hci_cb_list_lock);
551 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
555 hci_proto_encrypt_cfm(conn, status);
557 read_lock_bh(&hci_cb_list_lock);
558 list_for_each(p, &hci_cb_list) {
559 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
561 cb->encrypt_cfm(conn, status, encrypt);
563 read_unlock_bh(&hci_cb_list_lock);
566 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
570 read_lock_bh(&hci_cb_list_lock);
571 list_for_each(p, &hci_cb_list) {
572 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
573 if (cb->key_change_cfm)
574 cb->key_change_cfm(conn, status);
576 read_unlock_bh(&hci_cb_list_lock);
579 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
583 read_lock_bh(&hci_cb_list_lock);
584 list_for_each(p, &hci_cb_list) {
585 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
586 if (cb->role_switch_cfm)
587 cb->role_switch_cfm(conn, status, role);
589 read_unlock_bh(&hci_cb_list_lock);
592 int hci_register_cb(struct hci_cb *hcb);
593 int hci_unregister_cb(struct hci_cb *hcb);
595 int hci_register_notifier(struct notifier_block *nb);
596 int hci_unregister_notifier(struct notifier_block *nb);
598 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
599 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
600 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
602 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
604 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
606 /* ----- HCI Sockets ----- */
607 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
609 /* HCI info for socket */
610 #define hci_pi(sk) ((struct hci_pinfo *) sk)
614 struct hci_dev *hdev;
615 struct hci_filter filter;
619 /* HCI security filter */
620 #define HCI_SFLT_MAX_OGF 5
622 struct hci_sec_filter {
625 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
628 /* ----- HCI requests ----- */
629 #define HCI_REQ_DONE 0
630 #define HCI_REQ_PEND 1
631 #define HCI_REQ_CANCELED 2
633 #define hci_req_lock(d) down(&d->req_lock)
634 #define hci_req_unlock(d) up(&d->req_lock)
636 void hci_req_complete(struct hci_dev *hdev, int result);
638 #endif /* __HCI_CORE_H */