2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 __l2cap_sock_close(sk, ETIMEDOUT);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
188 l2cap_pi(next)->prev_c = prev;
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
222 bt_accept_enqueue(parent, sk);
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
253 sk->sk_state_change(sk);
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
266 spin_lock_bh(&conn->lock);
268 if (++conn->tx_ident > 128)
273 spin_unlock_bh(&conn->lock);
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
282 BT_DBG("code 0x%2.2x", code);
287 return hci_send_acl(conn->hcon, skb, 0);
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
293 struct l2cap_chan_list *l = &conn->chan_list;
296 BT_DBG("conn %p", conn);
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
319 read_unlock(&l->lock);
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
324 BT_DBG("conn %p", conn);
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
345 struct l2cap_chan_list *l = &conn->chan_list;
348 BT_DBG("conn %p", conn);
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
357 read_unlock(&l->lock);
360 static void l2cap_info_timeout(unsigned long arg)
362 struct l2cap_conn *conn = (void *) arg;
364 conn->info_ident = 0;
366 l2cap_conn_start(conn);
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
371 struct l2cap_conn *conn = hcon->l2cap_data;
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
380 hcon->l2cap_data = conn;
383 BT_DBG("hcon %p conn %p", hcon, conn);
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
401 struct l2cap_conn *conn = hcon->l2cap_data;
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
410 kfree_skb(conn->rx_skb);
413 while ((sk = conn->chan_list.head)) {
415 l2cap_chan_del(sk, err);
420 del_timer_sync(&conn->info_timer);
422 hcon->l2cap_data = NULL;
426 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
428 struct l2cap_chan_list *l = &conn->chan_list;
429 write_lock_bh(&l->lock);
430 __l2cap_chan_add(conn, sk, parent);
431 write_unlock_bh(&l->lock);
434 /* ---- Socket interface ---- */
435 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
438 struct hlist_node *node;
439 sk_for_each(sk, node, &l2cap_sk_list.head)
440 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
447 /* Find socket with psm and source bdaddr.
448 * Returns closest match.
450 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
452 struct sock *sk = NULL, *sk1 = NULL;
453 struct hlist_node *node;
455 sk_for_each(sk, node, &l2cap_sk_list.head) {
456 if (state && sk->sk_state != state)
459 if (l2cap_pi(sk)->psm == psm) {
461 if (!bacmp(&bt_sk(sk)->src, src))
465 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
469 return node ? sk : sk1;
472 /* Find socket with given address (psm, src).
473 * Returns locked socket */
474 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
477 read_lock(&l2cap_sk_list.lock);
478 s = __l2cap_get_sock_by_psm(state, psm, src);
479 if (s) bh_lock_sock(s);
480 read_unlock(&l2cap_sk_list.lock);
484 static void l2cap_sock_destruct(struct sock *sk)
488 skb_queue_purge(&sk->sk_receive_queue);
489 skb_queue_purge(&sk->sk_write_queue);
492 static void l2cap_sock_cleanup_listen(struct sock *parent)
496 BT_DBG("parent %p", parent);
498 /* Close not yet accepted channels */
499 while ((sk = bt_accept_dequeue(parent, NULL)))
500 l2cap_sock_close(sk);
502 parent->sk_state = BT_CLOSED;
503 sock_set_flag(parent, SOCK_ZAPPED);
506 /* Kill socket (only if zapped and orphan)
507 * Must be called on unlocked socket.
509 static void l2cap_sock_kill(struct sock *sk)
511 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
514 BT_DBG("sk %p state %d", sk, sk->sk_state);
516 /* Kill poor orphan */
517 bt_sock_unlink(&l2cap_sk_list, sk);
518 sock_set_flag(sk, SOCK_DEAD);
522 static void __l2cap_sock_close(struct sock *sk, int reason)
524 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
526 switch (sk->sk_state) {
528 l2cap_sock_cleanup_listen(sk);
534 if (sk->sk_type == SOCK_SEQPACKET) {
535 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
536 struct l2cap_disconn_req req;
538 sk->sk_state = BT_DISCONN;
539 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
541 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
542 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
543 l2cap_send_cmd(conn, l2cap_get_ident(conn),
544 L2CAP_DISCONN_REQ, sizeof(req), &req);
546 l2cap_chan_del(sk, reason);
552 l2cap_chan_del(sk, reason);
556 sock_set_flag(sk, SOCK_ZAPPED);
561 /* Must be called on unlocked socket. */
562 static void l2cap_sock_close(struct sock *sk)
564 l2cap_sock_clear_timer(sk);
566 __l2cap_sock_close(sk, ECONNRESET);
571 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
573 struct l2cap_pinfo *pi = l2cap_pi(sk);
578 sk->sk_type = parent->sk_type;
579 pi->imtu = l2cap_pi(parent)->imtu;
580 pi->omtu = l2cap_pi(parent)->omtu;
581 pi->link_mode = l2cap_pi(parent)->link_mode;
583 pi->imtu = L2CAP_DEFAULT_MTU;
588 /* Default config options */
590 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
593 static struct proto l2cap_proto = {
595 .owner = THIS_MODULE,
596 .obj_size = sizeof(struct l2cap_pinfo)
599 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
603 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
607 sock_init_data(sock, sk);
608 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
610 sk->sk_destruct = l2cap_sock_destruct;
611 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
613 sock_reset_flag(sk, SOCK_ZAPPED);
615 sk->sk_protocol = proto;
616 sk->sk_state = BT_OPEN;
618 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
620 bt_sock_link(&l2cap_sk_list, sk);
624 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
628 BT_DBG("sock %p", sock);
630 sock->state = SS_UNCONNECTED;
632 if (sock->type != SOCK_SEQPACKET &&
633 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
634 return -ESOCKTNOSUPPORT;
636 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
639 sock->ops = &l2cap_sock_ops;
641 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
645 l2cap_sock_init(sk, NULL);
649 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
651 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
652 struct sock *sk = sock->sk;
655 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
657 if (!addr || addr->sa_family != AF_BLUETOOTH)
662 if (sk->sk_state != BT_OPEN) {
667 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
668 !capable(CAP_NET_BIND_SERVICE)) {
673 write_lock_bh(&l2cap_sk_list.lock);
675 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
678 /* Save source address */
679 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
680 l2cap_pi(sk)->psm = la->l2_psm;
681 l2cap_pi(sk)->sport = la->l2_psm;
682 sk->sk_state = BT_BOUND;
685 write_unlock_bh(&l2cap_sk_list.lock);
692 static int l2cap_do_connect(struct sock *sk)
694 bdaddr_t *src = &bt_sk(sk)->src;
695 bdaddr_t *dst = &bt_sk(sk)->dst;
696 struct l2cap_conn *conn;
697 struct hci_conn *hcon;
698 struct hci_dev *hdev;
701 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
703 if (!(hdev = hci_get_route(dst, src)))
704 return -EHOSTUNREACH;
706 hci_dev_lock_bh(hdev);
710 hcon = hci_connect(hdev, ACL_LINK, dst);
714 conn = l2cap_conn_add(hcon, 0);
722 /* Update source addr of the socket */
723 bacpy(src, conn->src);
725 l2cap_chan_add(conn, sk, NULL);
727 sk->sk_state = BT_CONNECT;
728 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
730 if (hcon->state == BT_CONNECTED) {
731 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
732 l2cap_conn_ready(conn);
736 if (sk->sk_type == SOCK_SEQPACKET) {
737 struct l2cap_conn_req req;
738 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
739 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
740 req.psm = l2cap_pi(sk)->psm;
741 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
742 L2CAP_CONN_REQ, sizeof(req), &req);
744 l2cap_sock_clear_timer(sk);
745 sk->sk_state = BT_CONNECTED;
750 hci_dev_unlock_bh(hdev);
755 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
757 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
758 struct sock *sk = sock->sk;
765 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
770 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
775 switch(sk->sk_state) {
779 /* Already connecting */
783 /* Already connected */
796 /* Set destination address and psm */
797 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
798 l2cap_pi(sk)->psm = la->l2_psm;
800 if ((err = l2cap_do_connect(sk)))
804 err = bt_sock_wait_state(sk, BT_CONNECTED,
805 sock_sndtimeo(sk, flags & O_NONBLOCK));
811 static int l2cap_sock_listen(struct socket *sock, int backlog)
813 struct sock *sk = sock->sk;
816 BT_DBG("sk %p backlog %d", sk, backlog);
820 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
825 if (!l2cap_pi(sk)->psm) {
826 bdaddr_t *src = &bt_sk(sk)->src;
831 write_lock_bh(&l2cap_sk_list.lock);
833 for (psm = 0x1001; psm < 0x1100; psm += 2)
834 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
835 l2cap_pi(sk)->psm = htobs(psm);
836 l2cap_pi(sk)->sport = htobs(psm);
841 write_unlock_bh(&l2cap_sk_list.lock);
847 sk->sk_max_ack_backlog = backlog;
848 sk->sk_ack_backlog = 0;
849 sk->sk_state = BT_LISTEN;
856 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
858 DECLARE_WAITQUEUE(wait, current);
859 struct sock *sk = sock->sk, *nsk;
863 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
865 if (sk->sk_state != BT_LISTEN) {
870 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
872 BT_DBG("sk %p timeo %ld", sk, timeo);
874 /* Wait for an incoming connection. (wake-one). */
875 add_wait_queue_exclusive(sk->sk_sleep, &wait);
876 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
877 set_current_state(TASK_INTERRUPTIBLE);
884 timeo = schedule_timeout(timeo);
885 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
887 if (sk->sk_state != BT_LISTEN) {
892 if (signal_pending(current)) {
893 err = sock_intr_errno(timeo);
897 set_current_state(TASK_RUNNING);
898 remove_wait_queue(sk->sk_sleep, &wait);
903 newsock->state = SS_CONNECTED;
905 BT_DBG("new socket %p", nsk);
912 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
914 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
915 struct sock *sk = sock->sk;
917 BT_DBG("sock %p, sk %p", sock, sk);
919 addr->sa_family = AF_BLUETOOTH;
920 *len = sizeof(struct sockaddr_l2);
923 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
925 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
927 la->l2_psm = l2cap_pi(sk)->psm;
931 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
933 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
934 struct sk_buff *skb, **frag;
935 int err, hlen, count, sent=0;
936 struct l2cap_hdr *lh;
938 BT_DBG("sk %p len %d", sk, len);
940 /* First fragment (with L2CAP header) */
941 if (sk->sk_type == SOCK_DGRAM)
942 hlen = L2CAP_HDR_SIZE + 2;
944 hlen = L2CAP_HDR_SIZE;
946 count = min_t(unsigned int, (conn->mtu - hlen), len);
948 skb = bt_skb_send_alloc(sk, hlen + count,
949 msg->msg_flags & MSG_DONTWAIT, &err);
953 /* Create L2CAP header */
954 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
955 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
956 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
958 if (sk->sk_type == SOCK_DGRAM)
959 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
961 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
969 /* Continuation fragments (no L2CAP header) */
970 frag = &skb_shinfo(skb)->frag_list;
972 count = min_t(unsigned int, conn->mtu, len);
974 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
978 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
986 frag = &(*frag)->next;
989 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
999 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1001 struct sock *sk = sock->sk;
1004 BT_DBG("sock %p, sk %p", sock, sk);
1006 err = sock_error(sk);
1010 if (msg->msg_flags & MSG_OOB)
1013 /* Check outgoing MTU */
1014 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1019 if (sk->sk_state == BT_CONNECTED)
1020 err = l2cap_do_send(sk, msg, len);
1028 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1030 struct sock *sk = sock->sk;
1031 struct l2cap_options opts;
1035 BT_DBG("sk %p", sk);
1041 opts.imtu = l2cap_pi(sk)->imtu;
1042 opts.omtu = l2cap_pi(sk)->omtu;
1043 opts.flush_to = l2cap_pi(sk)->flush_to;
1044 opts.mode = L2CAP_MODE_BASIC;
1046 len = min_t(unsigned int, sizeof(opts), optlen);
1047 if (copy_from_user((char *) &opts, optval, len)) {
1052 l2cap_pi(sk)->imtu = opts.imtu;
1053 l2cap_pi(sk)->omtu = opts.omtu;
1057 if (get_user(opt, (u32 __user *) optval)) {
1062 l2cap_pi(sk)->link_mode = opt;
1074 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1076 struct sock *sk = sock->sk;
1077 struct l2cap_options opts;
1078 struct l2cap_conninfo cinfo;
1081 BT_DBG("sk %p", sk);
1083 if (get_user(len, optlen))
1090 opts.imtu = l2cap_pi(sk)->imtu;
1091 opts.omtu = l2cap_pi(sk)->omtu;
1092 opts.flush_to = l2cap_pi(sk)->flush_to;
1093 opts.mode = L2CAP_MODE_BASIC;
1095 len = min_t(unsigned int, len, sizeof(opts));
1096 if (copy_to_user(optval, (char *) &opts, len))
1102 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1106 case L2CAP_CONNINFO:
1107 if (sk->sk_state != BT_CONNECTED) {
1112 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1113 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1115 len = min_t(unsigned int, len, sizeof(cinfo));
1116 if (copy_to_user(optval, (char *) &cinfo, len))
1130 static int l2cap_sock_shutdown(struct socket *sock, int how)
1132 struct sock *sk = sock->sk;
1135 BT_DBG("sock %p, sk %p", sock, sk);
1141 if (!sk->sk_shutdown) {
1142 sk->sk_shutdown = SHUTDOWN_MASK;
1143 l2cap_sock_clear_timer(sk);
1144 __l2cap_sock_close(sk, 0);
1146 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1147 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1153 static int l2cap_sock_release(struct socket *sock)
1155 struct sock *sk = sock->sk;
1158 BT_DBG("sock %p, sk %p", sock, sk);
1163 err = l2cap_sock_shutdown(sock, 2);
1166 l2cap_sock_kill(sk);
1170 static void l2cap_chan_ready(struct sock *sk)
1172 struct sock *parent = bt_sk(sk)->parent;
1174 BT_DBG("sk %p, parent %p", sk, parent);
1176 l2cap_pi(sk)->conf_state = 0;
1177 l2cap_sock_clear_timer(sk);
1180 /* Outgoing channel.
1181 * Wake up socket sleeping on connect.
1183 sk->sk_state = BT_CONNECTED;
1184 sk->sk_state_change(sk);
1186 /* Incoming channel.
1187 * Wake up socket sleeping on accept.
1189 parent->sk_data_ready(parent, 0);
1193 /* Copy frame to all raw sockets on that connection */
1194 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1196 struct l2cap_chan_list *l = &conn->chan_list;
1197 struct sk_buff *nskb;
1200 BT_DBG("conn %p", conn);
1202 read_lock(&l->lock);
1203 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1204 if (sk->sk_type != SOCK_RAW)
1207 /* Don't send frame to the socket it came from */
1211 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1214 if (sock_queue_rcv_skb(sk, nskb))
1217 read_unlock(&l->lock);
1220 /* ---- L2CAP signalling commands ---- */
1221 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1222 u8 code, u8 ident, u16 dlen, void *data)
1224 struct sk_buff *skb, **frag;
1225 struct l2cap_cmd_hdr *cmd;
1226 struct l2cap_hdr *lh;
1229 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1231 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1232 count = min_t(unsigned int, conn->mtu, len);
1234 skb = bt_skb_alloc(count, GFP_ATOMIC);
1238 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1239 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1240 lh->cid = cpu_to_le16(0x0001);
1242 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1245 cmd->len = cpu_to_le16(dlen);
1248 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1249 memcpy(skb_put(skb, count), data, count);
1255 /* Continuation fragments (no L2CAP header) */
1256 frag = &skb_shinfo(skb)->frag_list;
1258 count = min_t(unsigned int, conn->mtu, len);
1260 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1264 memcpy(skb_put(*frag, count), data, count);
1269 frag = &(*frag)->next;
1279 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1281 struct l2cap_conf_opt *opt = *ptr;
1284 len = L2CAP_CONF_OPT_SIZE + opt->len;
1292 *val = *((u8 *) opt->val);
1296 *val = __le16_to_cpu(*((__le16 *) opt->val));
1300 *val = __le32_to_cpu(*((__le32 *) opt->val));
1304 *val = (unsigned long) opt->val;
1308 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1312 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1314 struct l2cap_conf_opt *opt = *ptr;
1316 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1323 *((u8 *) opt->val) = val;
1327 *((__le16 *) opt->val) = cpu_to_le16(val);
1331 *((__le32 *) opt->val) = cpu_to_le32(val);
1335 memcpy(opt->val, (void *) val, len);
1339 *ptr += L2CAP_CONF_OPT_SIZE + len;
1342 static int l2cap_build_conf_req(struct sock *sk, void *data)
1344 struct l2cap_pinfo *pi = l2cap_pi(sk);
1345 struct l2cap_conf_req *req = data;
1346 void *ptr = req->data;
1348 BT_DBG("sk %p", sk);
1350 if (pi->imtu != L2CAP_DEFAULT_MTU)
1351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1353 /* FIXME: Need actual value of the flush timeout */
1354 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1355 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1357 req->dcid = cpu_to_le16(pi->dcid);
1358 req->flags = cpu_to_le16(0);
1363 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 struct l2cap_conf_rsp *rsp = data;
1367 void *ptr = rsp->data;
1368 void *req = pi->conf_req;
1369 int len = pi->conf_len;
1370 int type, hint, olen;
1372 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1373 u16 mtu = L2CAP_DEFAULT_MTU;
1374 u16 result = L2CAP_CONF_SUCCESS;
1376 BT_DBG("sk %p", sk);
1378 while (len >= L2CAP_CONF_OPT_SIZE) {
1379 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1385 case L2CAP_CONF_MTU:
1389 case L2CAP_CONF_FLUSH_TO:
1393 case L2CAP_CONF_QOS:
1396 case L2CAP_CONF_RFC:
1397 if (olen == sizeof(rfc))
1398 memcpy(&rfc, (void *) val, olen);
1405 result = L2CAP_CONF_UNKNOWN;
1406 *((u8 *) ptr++) = type;
1411 if (result == L2CAP_CONF_SUCCESS) {
1412 /* Configure output options and let the other side know
1413 * which ones we don't like. */
1415 if (rfc.mode == L2CAP_MODE_BASIC) {
1417 result = L2CAP_CONF_UNACCEPT;
1420 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1425 result = L2CAP_CONF_UNACCEPT;
1427 memset(&rfc, 0, sizeof(rfc));
1428 rfc.mode = L2CAP_MODE_BASIC;
1430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1431 sizeof(rfc), (unsigned long) &rfc);
1435 rsp->scid = cpu_to_le16(pi->dcid);
1436 rsp->result = cpu_to_le16(result);
1437 rsp->flags = cpu_to_le16(0x0000);
1442 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1444 struct l2cap_conf_rsp *rsp = data;
1445 void *ptr = rsp->data;
1447 BT_DBG("sk %p", sk);
1449 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1450 rsp->result = cpu_to_le16(result);
1451 rsp->flags = cpu_to_le16(flags);
1456 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1458 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1460 if (rej->reason != 0x0000)
1463 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1464 cmd->ident == conn->info_ident) {
1465 conn->info_ident = 0;
1466 del_timer(&conn->info_timer);
1467 l2cap_conn_start(conn);
1473 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1475 struct l2cap_chan_list *list = &conn->chan_list;
1476 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1477 struct l2cap_conn_rsp rsp;
1478 struct sock *sk, *parent;
1479 int result = 0, status = 0;
1481 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1482 __le16 psm = req->psm;
1484 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1486 /* Check if we have socket listening on psm */
1487 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1489 result = L2CAP_CR_BAD_PSM;
1493 result = L2CAP_CR_NO_MEM;
1495 /* Check for backlog size */
1496 if (sk_acceptq_is_full(parent)) {
1497 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1501 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1505 write_lock_bh(&list->lock);
1507 /* Check if we already have channel with that dcid */
1508 if (__l2cap_get_chan_by_dcid(list, scid)) {
1509 write_unlock_bh(&list->lock);
1510 sock_set_flag(sk, SOCK_ZAPPED);
1511 l2cap_sock_kill(sk);
1515 hci_conn_hold(conn->hcon);
1517 l2cap_sock_init(sk, parent);
1518 bacpy(&bt_sk(sk)->src, conn->src);
1519 bacpy(&bt_sk(sk)->dst, conn->dst);
1520 l2cap_pi(sk)->psm = psm;
1521 l2cap_pi(sk)->dcid = scid;
1523 __l2cap_chan_add(conn, sk, parent);
1524 dcid = l2cap_pi(sk)->scid;
1526 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1528 /* Service level security */
1529 result = L2CAP_CR_PEND;
1530 status = L2CAP_CS_AUTHEN_PEND;
1531 sk->sk_state = BT_CONNECT2;
1532 l2cap_pi(sk)->ident = cmd->ident;
1534 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1535 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1536 if (!hci_conn_encrypt(conn->hcon))
1538 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1539 if (!hci_conn_auth(conn->hcon))
1543 sk->sk_state = BT_CONFIG;
1544 result = status = 0;
1547 write_unlock_bh(&list->lock);
1550 bh_unlock_sock(parent);
1553 rsp.scid = cpu_to_le16(scid);
1554 rsp.dcid = cpu_to_le16(dcid);
1555 rsp.result = cpu_to_le16(result);
1556 rsp.status = cpu_to_le16(status);
1557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1561 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1563 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1564 u16 scid, dcid, result, status;
1568 scid = __le16_to_cpu(rsp->scid);
1569 dcid = __le16_to_cpu(rsp->dcid);
1570 result = __le16_to_cpu(rsp->result);
1571 status = __le16_to_cpu(rsp->status);
1573 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1576 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1579 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1584 case L2CAP_CR_SUCCESS:
1585 sk->sk_state = BT_CONFIG;
1586 l2cap_pi(sk)->ident = 0;
1587 l2cap_pi(sk)->dcid = dcid;
1588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1590 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1591 l2cap_build_conf_req(sk, req), req);
1598 l2cap_chan_del(sk, ECONNREFUSED);
1606 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1608 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1614 dcid = __le16_to_cpu(req->dcid);
1615 flags = __le16_to_cpu(req->flags);
1617 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1619 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1622 if (sk->sk_state == BT_DISCONN)
1625 /* Reject if config buffer is too small. */
1626 len = cmd_len - sizeof(*req);
1627 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1628 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1629 l2cap_build_conf_rsp(sk, rsp,
1630 L2CAP_CONF_REJECT, flags), rsp);
1635 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1636 l2cap_pi(sk)->conf_len += len;
1638 if (flags & 0x0001) {
1639 /* Incomplete config. Send empty response. */
1640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1641 l2cap_build_conf_rsp(sk, rsp,
1642 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1646 /* Complete config. */
1647 len = l2cap_parse_conf_req(sk, rsp);
1651 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1653 /* Reset config buffer. */
1654 l2cap_pi(sk)->conf_len = 0;
1656 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1659 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1660 sk->sk_state = BT_CONNECTED;
1661 l2cap_chan_ready(sk);
1665 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1667 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1668 l2cap_build_conf_req(sk, req), req);
1676 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1678 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1679 u16 scid, flags, result;
1682 scid = __le16_to_cpu(rsp->scid);
1683 flags = __le16_to_cpu(rsp->flags);
1684 result = __le16_to_cpu(rsp->result);
1686 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1688 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1692 case L2CAP_CONF_SUCCESS:
1695 case L2CAP_CONF_UNACCEPT:
1696 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1698 /* It does not make sense to adjust L2CAP parameters
1699 * that are currently defined in the spec. We simply
1700 * resend config request that we sent earlier. It is
1701 * stupid, but it helps qualification testing which
1702 * expects at least some response from us. */
1703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1704 l2cap_build_conf_req(sk, req), req);
1709 sk->sk_state = BT_DISCONN;
1710 sk->sk_err = ECONNRESET;
1711 l2cap_sock_set_timer(sk, HZ * 5);
1713 struct l2cap_disconn_req req;
1714 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1716 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1717 L2CAP_DISCONN_REQ, sizeof(req), &req);
1725 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1727 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1728 sk->sk_state = BT_CONNECTED;
1729 l2cap_chan_ready(sk);
1737 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1739 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1740 struct l2cap_disconn_rsp rsp;
1744 scid = __le16_to_cpu(req->scid);
1745 dcid = __le16_to_cpu(req->dcid);
1747 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1749 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1752 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1753 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1754 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1756 sk->sk_shutdown = SHUTDOWN_MASK;
1758 l2cap_chan_del(sk, ECONNRESET);
1761 l2cap_sock_kill(sk);
1765 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1767 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1771 scid = __le16_to_cpu(rsp->scid);
1772 dcid = __le16_to_cpu(rsp->dcid);
1774 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1776 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1779 l2cap_chan_del(sk, 0);
1782 l2cap_sock_kill(sk);
1786 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1788 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1791 type = __le16_to_cpu(req->type);
1793 BT_DBG("type 0x%4.4x", type);
1795 if (type == L2CAP_IT_FEAT_MASK) {
1797 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1798 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1799 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1800 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1801 l2cap_send_cmd(conn, cmd->ident,
1802 L2CAP_INFO_RSP, sizeof(buf), buf);
1804 struct l2cap_info_rsp rsp;
1805 rsp.type = cpu_to_le16(type);
1806 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1807 l2cap_send_cmd(conn, cmd->ident,
1808 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1814 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1816 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1819 type = __le16_to_cpu(rsp->type);
1820 result = __le16_to_cpu(rsp->result);
1822 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1824 conn->info_ident = 0;
1826 del_timer(&conn->info_timer);
1828 if (type == L2CAP_IT_FEAT_MASK)
1829 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1831 l2cap_conn_start(conn);
1836 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1838 u8 *data = skb->data;
1840 struct l2cap_cmd_hdr cmd;
1843 l2cap_raw_recv(conn, skb);
1845 while (len >= L2CAP_CMD_HDR_SIZE) {
1847 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1848 data += L2CAP_CMD_HDR_SIZE;
1849 len -= L2CAP_CMD_HDR_SIZE;
1851 cmd_len = le16_to_cpu(cmd.len);
1853 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1855 if (cmd_len > len || !cmd.ident) {
1856 BT_DBG("corrupted command");
1861 case L2CAP_COMMAND_REJ:
1862 l2cap_command_rej(conn, &cmd, data);
1865 case L2CAP_CONN_REQ:
1866 err = l2cap_connect_req(conn, &cmd, data);
1869 case L2CAP_CONN_RSP:
1870 err = l2cap_connect_rsp(conn, &cmd, data);
1873 case L2CAP_CONF_REQ:
1874 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1877 case L2CAP_CONF_RSP:
1878 err = l2cap_config_rsp(conn, &cmd, data);
1881 case L2CAP_DISCONN_REQ:
1882 err = l2cap_disconnect_req(conn, &cmd, data);
1885 case L2CAP_DISCONN_RSP:
1886 err = l2cap_disconnect_rsp(conn, &cmd, data);
1889 case L2CAP_ECHO_REQ:
1890 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1893 case L2CAP_ECHO_RSP:
1896 case L2CAP_INFO_REQ:
1897 err = l2cap_information_req(conn, &cmd, data);
1900 case L2CAP_INFO_RSP:
1901 err = l2cap_information_rsp(conn, &cmd, data);
1905 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1911 struct l2cap_cmd_rej rej;
1912 BT_DBG("error %d", err);
1914 /* FIXME: Map err to a valid reason */
1915 rej.reason = cpu_to_le16(0);
1916 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1926 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1930 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1932 BT_DBG("unknown cid 0x%4.4x", cid);
1936 BT_DBG("sk %p, len %d", sk, skb->len);
1938 if (sk->sk_state != BT_CONNECTED)
1941 if (l2cap_pi(sk)->imtu < skb->len)
1944 /* If socket recv buffers overflows we drop data here
1945 * which is *bad* because L2CAP has to be reliable.
1946 * But we don't have any other choice. L2CAP doesn't
1947 * provide flow control mechanism. */
1949 if (!sock_queue_rcv_skb(sk, skb))
1962 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1966 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1970 BT_DBG("sk %p, len %d", sk, skb->len);
1972 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1975 if (l2cap_pi(sk)->imtu < skb->len)
1978 if (!sock_queue_rcv_skb(sk, skb))
1985 if (sk) bh_unlock_sock(sk);
1989 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1991 struct l2cap_hdr *lh = (void *) skb->data;
1995 skb_pull(skb, L2CAP_HDR_SIZE);
1996 cid = __le16_to_cpu(lh->cid);
1997 len = __le16_to_cpu(lh->len);
1999 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2003 l2cap_sig_channel(conn, skb);
2007 psm = get_unaligned((__le16 *) skb->data);
2009 l2cap_conless_channel(conn, psm, skb);
2013 l2cap_data_channel(conn, cid, skb);
2018 /* ---- L2CAP interface with lower layer (HCI) ---- */
2020 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2022 int exact = 0, lm1 = 0, lm2 = 0;
2023 register struct sock *sk;
2024 struct hlist_node *node;
2026 if (type != ACL_LINK)
2029 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2031 /* Find listening sockets and check their link_mode */
2032 read_lock(&l2cap_sk_list.lock);
2033 sk_for_each(sk, node, &l2cap_sk_list.head) {
2034 if (sk->sk_state != BT_LISTEN)
2037 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2038 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2040 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2041 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2043 read_unlock(&l2cap_sk_list.lock);
2045 return exact ? lm1 : lm2;
2048 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2050 struct l2cap_conn *conn;
2052 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2054 if (hcon->type != ACL_LINK)
2058 conn = l2cap_conn_add(hcon, status);
2060 l2cap_conn_ready(conn);
2062 l2cap_conn_del(hcon, bt_err(status));
2067 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2069 BT_DBG("hcon %p reason %d", hcon, reason);
2071 if (hcon->type != ACL_LINK)
2074 l2cap_conn_del(hcon, bt_err(reason));
2079 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2081 struct l2cap_chan_list *l;
2082 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2083 struct l2cap_conn_rsp rsp;
2090 l = &conn->chan_list;
2092 BT_DBG("conn %p", conn);
2094 read_lock(&l->lock);
2096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2099 if (sk->sk_state != BT_CONNECT2 ||
2100 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2101 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2107 sk->sk_state = BT_CONFIG;
2110 sk->sk_state = BT_DISCONN;
2111 l2cap_sock_set_timer(sk, HZ/10);
2112 result = L2CAP_CR_SEC_BLOCK;
2115 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2116 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2117 rsp.result = cpu_to_le16(result);
2118 rsp.status = cpu_to_le16(0);
2119 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2120 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2125 read_unlock(&l->lock);
2129 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2131 struct l2cap_chan_list *l;
2132 struct l2cap_conn *conn = hcon->l2cap_data;
2133 struct l2cap_conn_rsp rsp;
2140 l = &conn->chan_list;
2142 BT_DBG("conn %p", conn);
2144 read_lock(&l->lock);
2146 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2149 if (sk->sk_state != BT_CONNECT2) {
2155 sk->sk_state = BT_CONFIG;
2158 sk->sk_state = BT_DISCONN;
2159 l2cap_sock_set_timer(sk, HZ/10);
2160 result = L2CAP_CR_SEC_BLOCK;
2163 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2164 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2165 rsp.result = cpu_to_le16(result);
2166 rsp.status = cpu_to_le16(0);
2167 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2168 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2170 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2171 hci_conn_change_link_key(hcon);
2176 read_unlock(&l->lock);
2180 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2182 struct l2cap_conn *conn = hcon->l2cap_data;
2184 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2187 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2189 if (flags & ACL_START) {
2190 struct l2cap_hdr *hdr;
2194 BT_ERR("Unexpected start frame (len %d)", skb->len);
2195 kfree_skb(conn->rx_skb);
2196 conn->rx_skb = NULL;
2198 l2cap_conn_unreliable(conn, ECOMM);
2202 BT_ERR("Frame is too short (len %d)", skb->len);
2203 l2cap_conn_unreliable(conn, ECOMM);
2207 hdr = (struct l2cap_hdr *) skb->data;
2208 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2210 if (len == skb->len) {
2211 /* Complete frame received */
2212 l2cap_recv_frame(conn, skb);
2216 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2218 if (skb->len > len) {
2219 BT_ERR("Frame is too long (len %d, expected len %d)",
2221 l2cap_conn_unreliable(conn, ECOMM);
2225 /* Allocate skb for the complete frame (with header) */
2226 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2229 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2231 conn->rx_len = len - skb->len;
2233 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2235 if (!conn->rx_len) {
2236 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2237 l2cap_conn_unreliable(conn, ECOMM);
2241 if (skb->len > conn->rx_len) {
2242 BT_ERR("Fragment is too long (len %d, expected %d)",
2243 skb->len, conn->rx_len);
2244 kfree_skb(conn->rx_skb);
2245 conn->rx_skb = NULL;
2247 l2cap_conn_unreliable(conn, ECOMM);
2251 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2253 conn->rx_len -= skb->len;
2255 if (!conn->rx_len) {
2256 /* Complete frame received */
2257 l2cap_recv_frame(conn, conn->rx_skb);
2258 conn->rx_skb = NULL;
2267 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2270 struct hlist_node *node;
2273 read_lock_bh(&l2cap_sk_list.lock);
2275 sk_for_each(sk, node, &l2cap_sk_list.head) {
2276 struct l2cap_pinfo *pi = l2cap_pi(sk);
2278 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2279 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2280 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2281 pi->imtu, pi->omtu, pi->link_mode);
2284 read_unlock_bh(&l2cap_sk_list.lock);
2289 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2291 static const struct proto_ops l2cap_sock_ops = {
2292 .family = PF_BLUETOOTH,
2293 .owner = THIS_MODULE,
2294 .release = l2cap_sock_release,
2295 .bind = l2cap_sock_bind,
2296 .connect = l2cap_sock_connect,
2297 .listen = l2cap_sock_listen,
2298 .accept = l2cap_sock_accept,
2299 .getname = l2cap_sock_getname,
2300 .sendmsg = l2cap_sock_sendmsg,
2301 .recvmsg = bt_sock_recvmsg,
2302 .poll = bt_sock_poll,
2303 .mmap = sock_no_mmap,
2304 .socketpair = sock_no_socketpair,
2305 .ioctl = sock_no_ioctl,
2306 .shutdown = l2cap_sock_shutdown,
2307 .setsockopt = l2cap_sock_setsockopt,
2308 .getsockopt = l2cap_sock_getsockopt
2311 static struct net_proto_family l2cap_sock_family_ops = {
2312 .family = PF_BLUETOOTH,
2313 .owner = THIS_MODULE,
2314 .create = l2cap_sock_create,
2317 static struct hci_proto l2cap_hci_proto = {
2319 .id = HCI_PROTO_L2CAP,
2320 .connect_ind = l2cap_connect_ind,
2321 .connect_cfm = l2cap_connect_cfm,
2322 .disconn_ind = l2cap_disconn_ind,
2323 .auth_cfm = l2cap_auth_cfm,
2324 .encrypt_cfm = l2cap_encrypt_cfm,
2325 .recv_acldata = l2cap_recv_acldata
2328 static int __init l2cap_init(void)
2332 err = proto_register(&l2cap_proto, 0);
2336 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2338 BT_ERR("L2CAP socket registration failed");
2342 err = hci_register_proto(&l2cap_hci_proto);
2344 BT_ERR("L2CAP protocol registration failed");
2345 bt_sock_unregister(BTPROTO_L2CAP);
2349 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2350 BT_ERR("Failed to create L2CAP info file");
2352 BT_INFO("L2CAP ver %s", VERSION);
2353 BT_INFO("L2CAP socket layer initialized");
2358 proto_unregister(&l2cap_proto);
2362 static void __exit l2cap_exit(void)
2364 class_remove_file(bt_class, &class_attr_l2cap);
2366 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2367 BT_ERR("L2CAP socket unregistration failed");
2369 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2370 BT_ERR("L2CAP protocol unregistration failed");
2372 proto_unregister(&l2cap_proto);
2375 void l2cap_load(void)
2377 /* Dummy function to trigger automatic L2CAP module loading by
2378 * other modules that use L2CAP sockets but don't use any other
2379 * symbols from it. */
2382 EXPORT_SYMBOL(l2cap_load);
2384 module_init(l2cap_init);
2385 module_exit(l2cap_exit);
2387 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2388 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2389 MODULE_VERSION(VERSION);
2390 MODULE_LICENSE("GPL");
2391 MODULE_ALIAS("bt-proto-0");