2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
58 #define VERSION "2.10"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
182 l2cap_pi(l->head)->prev_c = sk;
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
193 write_lock_bh(&l->lock);
198 l2cap_pi(next)->prev_c = prev;
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
208 struct l2cap_chan_list *l = &conn->chan_list;
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
212 l2cap_pi(sk)->conn = conn;
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
229 __l2cap_chan_link(l, sk);
232 bt_accept_enqueue(parent, sk);
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
242 l2cap_sock_clear_timer(sk);
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
263 sk->sk_state_change(sk);
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
291 spin_lock_bh(&conn->lock);
293 if (++conn->tx_ident > 128)
298 spin_unlock_bh(&conn->lock);
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
307 BT_DBG("code 0x%2.2x", code);
312 return hci_send_acl(conn->hcon, skb, 0);
315 static void l2cap_do_start(struct sock *sk)
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
348 struct l2cap_chan_list *l = &conn->chan_list;
351 BT_DBG("conn %p", conn);
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
358 if (sk->sk_type != SOCK_SEQPACKET) {
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
395 read_unlock(&l->lock);
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
400 struct l2cap_chan_list *l = &conn->chan_list;
403 BT_DBG("conn %p", conn);
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
420 read_unlock(&l->lock);
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
426 struct l2cap_chan_list *l = &conn->chan_list;
429 BT_DBG("conn %p", conn);
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
438 read_unlock(&l->lock);
441 static void l2cap_info_timeout(unsigned long arg)
443 struct l2cap_conn *conn = (void *) arg;
445 conn->info_ident = 0;
447 l2cap_conn_start(conn);
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
452 struct l2cap_conn *conn = hcon->l2cap_data;
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
461 hcon->l2cap_data = conn;
464 BT_DBG("hcon %p conn %p", hcon, conn);
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
483 struct l2cap_conn *conn = hcon->l2cap_data;
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
492 kfree_skb(conn->rx_skb);
495 while ((sk = conn->chan_list.head)) {
497 l2cap_chan_del(sk, err);
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
505 hcon->l2cap_data = NULL;
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
542 if (l2cap_pi(sk)->psm == psm) {
544 if (!bacmp(&bt_sk(sk)->src, src))
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
552 return node ? sk : sk1;
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
567 static void l2cap_sock_destruct(struct sock *sk)
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
579 BT_DBG("parent %p", parent);
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
592 static void l2cap_sock_kill(struct sock *sk)
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
605 static void __l2cap_sock_close(struct sock *sk, int reason)
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
609 switch (sk->sk_state) {
611 l2cap_sock_cleanup_listen(sk);
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
629 l2cap_chan_del(sk, reason);
634 l2cap_chan_del(sk, reason);
638 sock_set_flag(sk, SOCK_ZAPPED);
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
646 l2cap_sock_clear_timer(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
665 pi->imtu = L2CAP_DEFAULT_MTU;
670 /* Default config options */
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
675 static struct proto l2cap_proto = {
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
695 sock_reset_flag(sk, SOCK_ZAPPED);
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
702 bt_sock_link(&l2cap_sk_list, sk);
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
710 BT_DBG("sock %p", sock);
712 sock->state = SS_UNCONNECTED;
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
721 sock->ops = &l2cap_sock_ops;
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
727 l2cap_sock_init(sk, NULL);
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
744 if (sk->sk_state != BT_OPEN) {
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
755 write_lock_bh(&l2cap_sk_list.lock);
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
767 write_unlock_bh(&l2cap_sk_list.lock);
774 static int l2cap_do_connect(struct sock *sk)
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
783 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
785 if (!(hdev = hci_get_route(dst, src)))
786 return -EHOSTUNREACH;
788 hci_dev_lock_bh(hdev);
792 hcon = hci_connect(hdev, ACL_LINK, dst);
796 conn = l2cap_conn_add(hcon, 0);
804 /* Update source addr of the socket */
805 bacpy(src, conn->src);
807 l2cap_chan_add(conn, sk, NULL);
809 sk->sk_state = BT_CONNECT;
810 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
812 if (hcon->state == BT_CONNECTED) {
813 if (sk->sk_type != SOCK_SEQPACKET) {
814 l2cap_sock_clear_timer(sk);
815 sk->sk_state = BT_CONNECTED;
821 hci_dev_unlock_bh(hdev);
826 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
828 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
829 struct sock *sk = sock->sk;
836 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
841 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
846 switch(sk->sk_state) {
850 /* Already connecting */
854 /* Already connected */
867 /* Set destination address and psm */
868 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
869 l2cap_pi(sk)->psm = la->l2_psm;
871 if ((err = l2cap_do_connect(sk)))
875 err = bt_sock_wait_state(sk, BT_CONNECTED,
876 sock_sndtimeo(sk, flags & O_NONBLOCK));
882 static int l2cap_sock_listen(struct socket *sock, int backlog)
884 struct sock *sk = sock->sk;
887 BT_DBG("sk %p backlog %d", sk, backlog);
891 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
896 if (!l2cap_pi(sk)->psm) {
897 bdaddr_t *src = &bt_sk(sk)->src;
902 write_lock_bh(&l2cap_sk_list.lock);
904 for (psm = 0x1001; psm < 0x1100; psm += 2)
905 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
906 l2cap_pi(sk)->psm = htobs(psm);
907 l2cap_pi(sk)->sport = htobs(psm);
912 write_unlock_bh(&l2cap_sk_list.lock);
918 sk->sk_max_ack_backlog = backlog;
919 sk->sk_ack_backlog = 0;
920 sk->sk_state = BT_LISTEN;
927 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
929 DECLARE_WAITQUEUE(wait, current);
930 struct sock *sk = sock->sk, *nsk;
934 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
936 if (sk->sk_state != BT_LISTEN) {
941 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
943 BT_DBG("sk %p timeo %ld", sk, timeo);
945 /* Wait for an incoming connection. (wake-one). */
946 add_wait_queue_exclusive(sk->sk_sleep, &wait);
947 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
948 set_current_state(TASK_INTERRUPTIBLE);
955 timeo = schedule_timeout(timeo);
956 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
958 if (sk->sk_state != BT_LISTEN) {
963 if (signal_pending(current)) {
964 err = sock_intr_errno(timeo);
968 set_current_state(TASK_RUNNING);
969 remove_wait_queue(sk->sk_sleep, &wait);
974 newsock->state = SS_CONNECTED;
976 BT_DBG("new socket %p", nsk);
983 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
985 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
986 struct sock *sk = sock->sk;
988 BT_DBG("sock %p, sk %p", sock, sk);
990 addr->sa_family = AF_BLUETOOTH;
991 *len = sizeof(struct sockaddr_l2);
994 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
996 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
998 la->l2_psm = l2cap_pi(sk)->psm;
1002 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1004 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1005 struct sk_buff *skb, **frag;
1006 int err, hlen, count, sent=0;
1007 struct l2cap_hdr *lh;
1009 BT_DBG("sk %p len %d", sk, len);
1011 /* First fragment (with L2CAP header) */
1012 if (sk->sk_type == SOCK_DGRAM)
1013 hlen = L2CAP_HDR_SIZE + 2;
1015 hlen = L2CAP_HDR_SIZE;
1017 count = min_t(unsigned int, (conn->mtu - hlen), len);
1019 skb = bt_skb_send_alloc(sk, hlen + count,
1020 msg->msg_flags & MSG_DONTWAIT, &err);
1024 /* Create L2CAP header */
1025 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1026 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1027 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1029 if (sk->sk_type == SOCK_DGRAM)
1030 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1032 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1040 /* Continuation fragments (no L2CAP header) */
1041 frag = &skb_shinfo(skb)->frag_list;
1043 count = min_t(unsigned int, conn->mtu, len);
1045 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1049 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1057 frag = &(*frag)->next;
1060 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1070 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1072 struct sock *sk = sock->sk;
1075 BT_DBG("sock %p, sk %p", sock, sk);
1077 err = sock_error(sk);
1081 if (msg->msg_flags & MSG_OOB)
1084 /* Check outgoing MTU */
1085 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1090 if (sk->sk_state == BT_CONNECTED)
1091 err = l2cap_do_send(sk, msg, len);
1099 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1101 struct sock *sk = sock->sk;
1102 struct l2cap_options opts;
1106 BT_DBG("sk %p", sk);
1112 opts.imtu = l2cap_pi(sk)->imtu;
1113 opts.omtu = l2cap_pi(sk)->omtu;
1114 opts.flush_to = l2cap_pi(sk)->flush_to;
1115 opts.mode = L2CAP_MODE_BASIC;
1117 len = min_t(unsigned int, sizeof(opts), optlen);
1118 if (copy_from_user((char *) &opts, optval, len)) {
1123 l2cap_pi(sk)->imtu = opts.imtu;
1124 l2cap_pi(sk)->omtu = opts.omtu;
1128 if (get_user(opt, (u32 __user *) optval)) {
1133 l2cap_pi(sk)->link_mode = opt;
1145 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1147 struct sock *sk = sock->sk;
1148 struct l2cap_options opts;
1149 struct l2cap_conninfo cinfo;
1152 BT_DBG("sk %p", sk);
1154 if (get_user(len, optlen))
1161 opts.imtu = l2cap_pi(sk)->imtu;
1162 opts.omtu = l2cap_pi(sk)->omtu;
1163 opts.flush_to = l2cap_pi(sk)->flush_to;
1164 opts.mode = L2CAP_MODE_BASIC;
1166 len = min_t(unsigned int, len, sizeof(opts));
1167 if (copy_to_user(optval, (char *) &opts, len))
1173 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1177 case L2CAP_CONNINFO:
1178 if (sk->sk_state != BT_CONNECTED) {
1183 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1184 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1186 len = min_t(unsigned int, len, sizeof(cinfo));
1187 if (copy_to_user(optval, (char *) &cinfo, len))
1201 static int l2cap_sock_shutdown(struct socket *sock, int how)
1203 struct sock *sk = sock->sk;
1206 BT_DBG("sock %p, sk %p", sock, sk);
1212 if (!sk->sk_shutdown) {
1213 sk->sk_shutdown = SHUTDOWN_MASK;
1214 l2cap_sock_clear_timer(sk);
1215 __l2cap_sock_close(sk, 0);
1217 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1218 err = bt_sock_wait_state(sk, BT_CLOSED,
1225 static int l2cap_sock_release(struct socket *sock)
1227 struct sock *sk = sock->sk;
1230 BT_DBG("sock %p, sk %p", sock, sk);
1235 err = l2cap_sock_shutdown(sock, 2);
1238 l2cap_sock_kill(sk);
1242 static void l2cap_chan_ready(struct sock *sk)
1244 struct sock *parent = bt_sk(sk)->parent;
1246 BT_DBG("sk %p, parent %p", sk, parent);
1248 l2cap_pi(sk)->conf_state = 0;
1249 l2cap_sock_clear_timer(sk);
1252 /* Outgoing channel.
1253 * Wake up socket sleeping on connect.
1255 sk->sk_state = BT_CONNECTED;
1256 sk->sk_state_change(sk);
1258 /* Incoming channel.
1259 * Wake up socket sleeping on accept.
1261 parent->sk_data_ready(parent, 0);
1264 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 hci_conn_change_link_key(conn->hcon);
1270 /* Copy frame to all raw sockets on that connection */
1271 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1273 struct l2cap_chan_list *l = &conn->chan_list;
1274 struct sk_buff *nskb;
1277 BT_DBG("conn %p", conn);
1279 read_lock(&l->lock);
1280 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1281 if (sk->sk_type != SOCK_RAW)
1284 /* Don't send frame to the socket it came from */
1288 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1291 if (sock_queue_rcv_skb(sk, nskb))
1294 read_unlock(&l->lock);
1297 /* ---- L2CAP signalling commands ---- */
1298 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1299 u8 code, u8 ident, u16 dlen, void *data)
1301 struct sk_buff *skb, **frag;
1302 struct l2cap_cmd_hdr *cmd;
1303 struct l2cap_hdr *lh;
1306 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1308 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1309 count = min_t(unsigned int, conn->mtu, len);
1311 skb = bt_skb_alloc(count, GFP_ATOMIC);
1315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1316 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1317 lh->cid = cpu_to_le16(0x0001);
1319 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1322 cmd->len = cpu_to_le16(dlen);
1325 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1326 memcpy(skb_put(skb, count), data, count);
1332 /* Continuation fragments (no L2CAP header) */
1333 frag = &skb_shinfo(skb)->frag_list;
1335 count = min_t(unsigned int, conn->mtu, len);
1337 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1341 memcpy(skb_put(*frag, count), data, count);
1346 frag = &(*frag)->next;
1356 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1358 struct l2cap_conf_opt *opt = *ptr;
1361 len = L2CAP_CONF_OPT_SIZE + opt->len;
1369 *val = *((u8 *) opt->val);
1373 *val = __le16_to_cpu(*((__le16 *) opt->val));
1377 *val = __le32_to_cpu(*((__le32 *) opt->val));
1381 *val = (unsigned long) opt->val;
1385 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1389 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1391 struct l2cap_conf_opt *opt = *ptr;
1393 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1400 *((u8 *) opt->val) = val;
1404 *((__le16 *) opt->val) = cpu_to_le16(val);
1408 *((__le32 *) opt->val) = cpu_to_le32(val);
1412 memcpy(opt->val, (void *) val, len);
1416 *ptr += L2CAP_CONF_OPT_SIZE + len;
1419 static int l2cap_build_conf_req(struct sock *sk, void *data)
1421 struct l2cap_pinfo *pi = l2cap_pi(sk);
1422 struct l2cap_conf_req *req = data;
1423 void *ptr = req->data;
1425 BT_DBG("sk %p", sk);
1427 if (pi->imtu != L2CAP_DEFAULT_MTU)
1428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1430 /* FIXME: Need actual value of the flush timeout */
1431 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1432 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1434 req->dcid = cpu_to_le16(pi->dcid);
1435 req->flags = cpu_to_le16(0);
1440 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1442 struct l2cap_pinfo *pi = l2cap_pi(sk);
1443 struct l2cap_conf_rsp *rsp = data;
1444 void *ptr = rsp->data;
1445 void *req = pi->conf_req;
1446 int len = pi->conf_len;
1447 int type, hint, olen;
1449 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1450 u16 mtu = L2CAP_DEFAULT_MTU;
1451 u16 result = L2CAP_CONF_SUCCESS;
1453 BT_DBG("sk %p", sk);
1455 while (len >= L2CAP_CONF_OPT_SIZE) {
1456 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1462 case L2CAP_CONF_MTU:
1466 case L2CAP_CONF_FLUSH_TO:
1470 case L2CAP_CONF_QOS:
1473 case L2CAP_CONF_RFC:
1474 if (olen == sizeof(rfc))
1475 memcpy(&rfc, (void *) val, olen);
1482 result = L2CAP_CONF_UNKNOWN;
1483 *((u8 *) ptr++) = type;
1488 if (result == L2CAP_CONF_SUCCESS) {
1489 /* Configure output options and let the other side know
1490 * which ones we don't like. */
1492 if (rfc.mode == L2CAP_MODE_BASIC) {
1494 result = L2CAP_CONF_UNACCEPT;
1497 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1502 result = L2CAP_CONF_UNACCEPT;
1504 memset(&rfc, 0, sizeof(rfc));
1505 rfc.mode = L2CAP_MODE_BASIC;
1507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1508 sizeof(rfc), (unsigned long) &rfc);
1512 rsp->scid = cpu_to_le16(pi->dcid);
1513 rsp->result = cpu_to_le16(result);
1514 rsp->flags = cpu_to_le16(0x0000);
1519 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1521 struct l2cap_conf_rsp *rsp = data;
1522 void *ptr = rsp->data;
1524 BT_DBG("sk %p", sk);
1526 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1527 rsp->result = cpu_to_le16(result);
1528 rsp->flags = cpu_to_le16(flags);
1533 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1535 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1537 if (rej->reason != 0x0000)
1540 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1541 cmd->ident == conn->info_ident) {
1542 conn->info_ident = 0;
1543 del_timer(&conn->info_timer);
1544 l2cap_conn_start(conn);
1550 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1552 struct l2cap_chan_list *list = &conn->chan_list;
1553 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1554 struct l2cap_conn_rsp rsp;
1555 struct sock *sk, *parent;
1556 int result, status = 0;
1558 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1559 __le16 psm = req->psm;
1561 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1563 /* Check if we have socket listening on psm */
1564 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1566 result = L2CAP_CR_BAD_PSM;
1570 result = L2CAP_CR_NO_MEM;
1572 /* Check for backlog size */
1573 if (sk_acceptq_is_full(parent)) {
1574 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1578 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1582 write_lock_bh(&list->lock);
1584 /* Check if we already have channel with that dcid */
1585 if (__l2cap_get_chan_by_dcid(list, scid)) {
1586 write_unlock_bh(&list->lock);
1587 sock_set_flag(sk, SOCK_ZAPPED);
1588 l2cap_sock_kill(sk);
1592 hci_conn_hold(conn->hcon);
1594 l2cap_sock_init(sk, parent);
1595 bacpy(&bt_sk(sk)->src, conn->src);
1596 bacpy(&bt_sk(sk)->dst, conn->dst);
1597 l2cap_pi(sk)->psm = psm;
1598 l2cap_pi(sk)->dcid = scid;
1600 __l2cap_chan_add(conn, sk, parent);
1601 dcid = l2cap_pi(sk)->scid;
1603 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1605 l2cap_pi(sk)->ident = cmd->ident;
1607 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1608 if (l2cap_check_link_mode(sk)) {
1609 sk->sk_state = BT_CONFIG;
1610 result = L2CAP_CR_SUCCESS;
1611 status = L2CAP_CS_NO_INFO;
1613 sk->sk_state = BT_CONNECT2;
1614 result = L2CAP_CR_PEND;
1615 status = L2CAP_CS_AUTHEN_PEND;
1618 sk->sk_state = BT_CONNECT2;
1619 result = L2CAP_CR_PEND;
1620 status = L2CAP_CS_NO_INFO;
1623 write_unlock_bh(&list->lock);
1626 bh_unlock_sock(parent);
1629 rsp.scid = cpu_to_le16(scid);
1630 rsp.dcid = cpu_to_le16(dcid);
1631 rsp.result = cpu_to_le16(result);
1632 rsp.status = cpu_to_le16(status);
1633 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1635 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1636 struct l2cap_info_req info;
1637 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1640 conn->info_ident = l2cap_get_ident(conn);
1642 mod_timer(&conn->info_timer, jiffies +
1643 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1645 l2cap_send_cmd(conn, conn->info_ident,
1646 L2CAP_INFO_REQ, sizeof(info), &info);
1652 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1654 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1655 u16 scid, dcid, result, status;
1659 scid = __le16_to_cpu(rsp->scid);
1660 dcid = __le16_to_cpu(rsp->dcid);
1661 result = __le16_to_cpu(rsp->result);
1662 status = __le16_to_cpu(rsp->status);
1664 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1667 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1670 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1675 case L2CAP_CR_SUCCESS:
1676 sk->sk_state = BT_CONFIG;
1677 l2cap_pi(sk)->ident = 0;
1678 l2cap_pi(sk)->dcid = dcid;
1679 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1681 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1682 l2cap_build_conf_req(sk, req), req);
1689 l2cap_chan_del(sk, ECONNREFUSED);
1697 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1699 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1705 dcid = __le16_to_cpu(req->dcid);
1706 flags = __le16_to_cpu(req->flags);
1708 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1710 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1713 if (sk->sk_state == BT_DISCONN)
1716 /* Reject if config buffer is too small. */
1717 len = cmd_len - sizeof(*req);
1718 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1719 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1720 l2cap_build_conf_rsp(sk, rsp,
1721 L2CAP_CONF_REJECT, flags), rsp);
1726 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1727 l2cap_pi(sk)->conf_len += len;
1729 if (flags & 0x0001) {
1730 /* Incomplete config. Send empty response. */
1731 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1732 l2cap_build_conf_rsp(sk, rsp,
1733 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1737 /* Complete config. */
1738 len = l2cap_parse_conf_req(sk, rsp);
1742 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1744 /* Reset config buffer. */
1745 l2cap_pi(sk)->conf_len = 0;
1747 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1750 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1751 sk->sk_state = BT_CONNECTED;
1752 l2cap_chan_ready(sk);
1756 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1759 l2cap_build_conf_req(sk, buf), buf);
1767 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1769 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1770 u16 scid, flags, result;
1773 scid = __le16_to_cpu(rsp->scid);
1774 flags = __le16_to_cpu(rsp->flags);
1775 result = __le16_to_cpu(rsp->result);
1777 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1779 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1783 case L2CAP_CONF_SUCCESS:
1786 case L2CAP_CONF_UNACCEPT:
1787 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1789 /* It does not make sense to adjust L2CAP parameters
1790 * that are currently defined in the spec. We simply
1791 * resend config request that we sent earlier. It is
1792 * stupid, but it helps qualification testing which
1793 * expects at least some response from us. */
1794 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1795 l2cap_build_conf_req(sk, req), req);
1800 sk->sk_state = BT_DISCONN;
1801 sk->sk_err = ECONNRESET;
1802 l2cap_sock_set_timer(sk, HZ * 5);
1804 struct l2cap_disconn_req req;
1805 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1806 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1807 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1808 L2CAP_DISCONN_REQ, sizeof(req), &req);
1816 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1818 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1819 sk->sk_state = BT_CONNECTED;
1820 l2cap_chan_ready(sk);
1828 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1830 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1831 struct l2cap_disconn_rsp rsp;
1835 scid = __le16_to_cpu(req->scid);
1836 dcid = __le16_to_cpu(req->dcid);
1838 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1840 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1843 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1844 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1845 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1847 sk->sk_shutdown = SHUTDOWN_MASK;
1849 l2cap_chan_del(sk, ECONNRESET);
1852 l2cap_sock_kill(sk);
1856 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1858 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1862 scid = __le16_to_cpu(rsp->scid);
1863 dcid = __le16_to_cpu(rsp->dcid);
1865 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1867 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1870 l2cap_chan_del(sk, 0);
1873 l2cap_sock_kill(sk);
1877 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1879 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1882 type = __le16_to_cpu(req->type);
1884 BT_DBG("type 0x%4.4x", type);
1886 if (type == L2CAP_IT_FEAT_MASK) {
1888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1891 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1892 l2cap_send_cmd(conn, cmd->ident,
1893 L2CAP_INFO_RSP, sizeof(buf), buf);
1895 struct l2cap_info_rsp rsp;
1896 rsp.type = cpu_to_le16(type);
1897 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1898 l2cap_send_cmd(conn, cmd->ident,
1899 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1905 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1907 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1910 type = __le16_to_cpu(rsp->type);
1911 result = __le16_to_cpu(rsp->result);
1913 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1915 conn->info_ident = 0;
1917 del_timer(&conn->info_timer);
1919 if (type == L2CAP_IT_FEAT_MASK)
1920 conn->feat_mask = get_unaligned_le32(rsp->data);
1922 l2cap_conn_start(conn);
1927 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1929 u8 *data = skb->data;
1931 struct l2cap_cmd_hdr cmd;
1934 l2cap_raw_recv(conn, skb);
1936 while (len >= L2CAP_CMD_HDR_SIZE) {
1938 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1939 data += L2CAP_CMD_HDR_SIZE;
1940 len -= L2CAP_CMD_HDR_SIZE;
1942 cmd_len = le16_to_cpu(cmd.len);
1944 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1946 if (cmd_len > len || !cmd.ident) {
1947 BT_DBG("corrupted command");
1952 case L2CAP_COMMAND_REJ:
1953 l2cap_command_rej(conn, &cmd, data);
1956 case L2CAP_CONN_REQ:
1957 err = l2cap_connect_req(conn, &cmd, data);
1960 case L2CAP_CONN_RSP:
1961 err = l2cap_connect_rsp(conn, &cmd, data);
1964 case L2CAP_CONF_REQ:
1965 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1968 case L2CAP_CONF_RSP:
1969 err = l2cap_config_rsp(conn, &cmd, data);
1972 case L2CAP_DISCONN_REQ:
1973 err = l2cap_disconnect_req(conn, &cmd, data);
1976 case L2CAP_DISCONN_RSP:
1977 err = l2cap_disconnect_rsp(conn, &cmd, data);
1980 case L2CAP_ECHO_REQ:
1981 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1984 case L2CAP_ECHO_RSP:
1987 case L2CAP_INFO_REQ:
1988 err = l2cap_information_req(conn, &cmd, data);
1991 case L2CAP_INFO_RSP:
1992 err = l2cap_information_rsp(conn, &cmd, data);
1996 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2002 struct l2cap_cmd_rej rej;
2003 BT_DBG("error %d", err);
2005 /* FIXME: Map err to a valid reason */
2006 rej.reason = cpu_to_le16(0);
2007 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2017 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2021 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2023 BT_DBG("unknown cid 0x%4.4x", cid);
2027 BT_DBG("sk %p, len %d", sk, skb->len);
2029 if (sk->sk_state != BT_CONNECTED)
2032 if (l2cap_pi(sk)->imtu < skb->len)
2035 /* If socket recv buffers overflows we drop data here
2036 * which is *bad* because L2CAP has to be reliable.
2037 * But we don't have any other choice. L2CAP doesn't
2038 * provide flow control mechanism. */
2040 if (!sock_queue_rcv_skb(sk, skb))
2053 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2057 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2061 BT_DBG("sk %p, len %d", sk, skb->len);
2063 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2066 if (l2cap_pi(sk)->imtu < skb->len)
2069 if (!sock_queue_rcv_skb(sk, skb))
2076 if (sk) bh_unlock_sock(sk);
2080 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2082 struct l2cap_hdr *lh = (void *) skb->data;
2086 skb_pull(skb, L2CAP_HDR_SIZE);
2087 cid = __le16_to_cpu(lh->cid);
2088 len = __le16_to_cpu(lh->len);
2090 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2094 l2cap_sig_channel(conn, skb);
2098 psm = get_unaligned((__le16 *) skb->data);
2100 l2cap_conless_channel(conn, psm, skb);
2104 l2cap_data_channel(conn, cid, skb);
2109 /* ---- L2CAP interface with lower layer (HCI) ---- */
2111 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2113 int exact = 0, lm1 = 0, lm2 = 0;
2114 register struct sock *sk;
2115 struct hlist_node *node;
2117 if (type != ACL_LINK)
2120 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2122 /* Find listening sockets and check their link_mode */
2123 read_lock(&l2cap_sk_list.lock);
2124 sk_for_each(sk, node, &l2cap_sk_list.head) {
2125 if (sk->sk_state != BT_LISTEN)
2128 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2129 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2131 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2132 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2134 read_unlock(&l2cap_sk_list.lock);
2136 return exact ? lm1 : lm2;
2139 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2141 struct l2cap_conn *conn;
2143 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2145 if (hcon->type != ACL_LINK)
2149 conn = l2cap_conn_add(hcon, status);
2151 l2cap_conn_ready(conn);
2153 l2cap_conn_del(hcon, bt_err(status));
2158 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2160 BT_DBG("hcon %p reason %d", hcon, reason);
2162 if (hcon->type != ACL_LINK)
2165 l2cap_conn_del(hcon, bt_err(reason));
2170 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2172 struct l2cap_chan_list *l;
2173 struct l2cap_conn *conn = hcon->l2cap_data;
2179 l = &conn->chan_list;
2181 BT_DBG("conn %p", conn);
2183 read_lock(&l->lock);
2185 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2186 struct l2cap_pinfo *pi = l2cap_pi(sk);
2190 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2191 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2197 if (sk->sk_state == BT_CONNECT) {
2199 struct l2cap_conn_req req;
2200 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2201 req.psm = l2cap_pi(sk)->psm;
2203 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2205 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2206 L2CAP_CONN_REQ, sizeof(req), &req);
2208 l2cap_sock_clear_timer(sk);
2209 l2cap_sock_set_timer(sk, HZ / 10);
2211 } else if (sk->sk_state == BT_CONNECT2) {
2212 struct l2cap_conn_rsp rsp;
2216 sk->sk_state = BT_CONFIG;
2217 result = L2CAP_CR_SUCCESS;
2219 sk->sk_state = BT_DISCONN;
2220 l2cap_sock_set_timer(sk, HZ / 10);
2221 result = L2CAP_CR_SEC_BLOCK;
2224 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2225 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2226 rsp.result = cpu_to_le16(result);
2227 rsp.status = cpu_to_le16(0);
2228 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2229 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2235 read_unlock(&l->lock);
2240 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2242 struct l2cap_chan_list *l;
2243 struct l2cap_conn *conn = hcon->l2cap_data;
2249 l = &conn->chan_list;
2251 BT_DBG("conn %p", conn);
2253 read_lock(&l->lock);
2255 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2256 struct l2cap_pinfo *pi = l2cap_pi(sk);
2260 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2261 (sk->sk_state == BT_CONNECTED ||
2262 sk->sk_state == BT_CONFIG) &&
2263 !status && encrypt == 0x00) {
2264 __l2cap_sock_close(sk, ECONNREFUSED);
2269 if (sk->sk_state == BT_CONNECT) {
2271 struct l2cap_conn_req req;
2272 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2273 req.psm = l2cap_pi(sk)->psm;
2275 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2277 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2278 L2CAP_CONN_REQ, sizeof(req), &req);
2280 l2cap_sock_clear_timer(sk);
2281 l2cap_sock_set_timer(sk, HZ / 10);
2283 } else if (sk->sk_state == BT_CONNECT2) {
2284 struct l2cap_conn_rsp rsp;
2288 sk->sk_state = BT_CONFIG;
2289 result = L2CAP_CR_SUCCESS;
2291 sk->sk_state = BT_DISCONN;
2292 l2cap_sock_set_timer(sk, HZ / 10);
2293 result = L2CAP_CR_SEC_BLOCK;
2296 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2297 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2298 rsp.result = cpu_to_le16(result);
2299 rsp.status = cpu_to_le16(0);
2300 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2301 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2307 read_unlock(&l->lock);
2312 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2314 struct l2cap_conn *conn = hcon->l2cap_data;
2316 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2319 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2321 if (flags & ACL_START) {
2322 struct l2cap_hdr *hdr;
2326 BT_ERR("Unexpected start frame (len %d)", skb->len);
2327 kfree_skb(conn->rx_skb);
2328 conn->rx_skb = NULL;
2330 l2cap_conn_unreliable(conn, ECOMM);
2334 BT_ERR("Frame is too short (len %d)", skb->len);
2335 l2cap_conn_unreliable(conn, ECOMM);
2339 hdr = (struct l2cap_hdr *) skb->data;
2340 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2342 if (len == skb->len) {
2343 /* Complete frame received */
2344 l2cap_recv_frame(conn, skb);
2348 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2350 if (skb->len > len) {
2351 BT_ERR("Frame is too long (len %d, expected len %d)",
2353 l2cap_conn_unreliable(conn, ECOMM);
2357 /* Allocate skb for the complete frame (with header) */
2358 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2361 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2363 conn->rx_len = len - skb->len;
2365 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2367 if (!conn->rx_len) {
2368 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2369 l2cap_conn_unreliable(conn, ECOMM);
2373 if (skb->len > conn->rx_len) {
2374 BT_ERR("Fragment is too long (len %d, expected %d)",
2375 skb->len, conn->rx_len);
2376 kfree_skb(conn->rx_skb);
2377 conn->rx_skb = NULL;
2379 l2cap_conn_unreliable(conn, ECOMM);
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2385 conn->rx_len -= skb->len;
2387 if (!conn->rx_len) {
2388 /* Complete frame received */
2389 l2cap_recv_frame(conn, conn->rx_skb);
2390 conn->rx_skb = NULL;
2399 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2402 struct hlist_node *node;
2405 read_lock_bh(&l2cap_sk_list.lock);
2407 sk_for_each(sk, node, &l2cap_sk_list.head) {
2408 struct l2cap_pinfo *pi = l2cap_pi(sk);
2410 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2411 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2412 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2413 pi->imtu, pi->omtu, pi->link_mode);
2416 read_unlock_bh(&l2cap_sk_list.lock);
2421 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2423 static const struct proto_ops l2cap_sock_ops = {
2424 .family = PF_BLUETOOTH,
2425 .owner = THIS_MODULE,
2426 .release = l2cap_sock_release,
2427 .bind = l2cap_sock_bind,
2428 .connect = l2cap_sock_connect,
2429 .listen = l2cap_sock_listen,
2430 .accept = l2cap_sock_accept,
2431 .getname = l2cap_sock_getname,
2432 .sendmsg = l2cap_sock_sendmsg,
2433 .recvmsg = bt_sock_recvmsg,
2434 .poll = bt_sock_poll,
2435 .ioctl = bt_sock_ioctl,
2436 .mmap = sock_no_mmap,
2437 .socketpair = sock_no_socketpair,
2438 .shutdown = l2cap_sock_shutdown,
2439 .setsockopt = l2cap_sock_setsockopt,
2440 .getsockopt = l2cap_sock_getsockopt
2443 static struct net_proto_family l2cap_sock_family_ops = {
2444 .family = PF_BLUETOOTH,
2445 .owner = THIS_MODULE,
2446 .create = l2cap_sock_create,
2449 static struct hci_proto l2cap_hci_proto = {
2451 .id = HCI_PROTO_L2CAP,
2452 .connect_ind = l2cap_connect_ind,
2453 .connect_cfm = l2cap_connect_cfm,
2454 .disconn_ind = l2cap_disconn_ind,
2455 .auth_cfm = l2cap_auth_cfm,
2456 .encrypt_cfm = l2cap_encrypt_cfm,
2457 .recv_acldata = l2cap_recv_acldata
2460 static int __init l2cap_init(void)
2464 err = proto_register(&l2cap_proto, 0);
2468 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2470 BT_ERR("L2CAP socket registration failed");
2474 err = hci_register_proto(&l2cap_hci_proto);
2476 BT_ERR("L2CAP protocol registration failed");
2477 bt_sock_unregister(BTPROTO_L2CAP);
2481 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2482 BT_ERR("Failed to create L2CAP info file");
2484 BT_INFO("L2CAP ver %s", VERSION);
2485 BT_INFO("L2CAP socket layer initialized");
2490 proto_unregister(&l2cap_proto);
2494 static void __exit l2cap_exit(void)
2496 class_remove_file(bt_class, &class_attr_l2cap);
2498 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2499 BT_ERR("L2CAP socket unregistration failed");
2501 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2502 BT_ERR("L2CAP protocol unregistration failed");
2504 proto_unregister(&l2cap_proto);
2507 void l2cap_load(void)
2509 /* Dummy function to trigger automatic L2CAP module loading by
2510 * other modules that use L2CAP sockets but don't use any other
2511 * symbols from it. */
2514 EXPORT_SYMBOL(l2cap_load);
2516 module_init(l2cap_init);
2517 module_exit(l2cap_exit);
2519 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2520 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2521 MODULE_VERSION(VERSION);
2522 MODULE_LICENSE("GPL");
2523 MODULE_ALIAS("bt-proto-0");