2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
62 static struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 __l2cap_sock_close(sk, ETIMEDOUT);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 rwlock_init(&conn->chan_list.lock);
139 BT_DBG("hcon %p conn %p", hcon, conn);
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
148 if (!(conn = hcon->l2cap_data))
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
154 kfree_skb(conn->rx_skb);
157 while ((sk = conn->chan_list.head)) {
159 l2cap_chan_del(sk, err);
164 hcon->l2cap_data = NULL;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
181 /* Get next available identificator.
182 * 1 - 128 are used by kernel.
183 * 129 - 199 are reserved.
184 * 200 - 254 are used by utilities like l2ping, etc.
187 spin_lock(&conn->lock);
189 if (++conn->tx_ident > 128)
194 spin_unlock(&conn->lock);
199 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
201 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
203 BT_DBG("code 0x%2.2x", code);
208 return hci_send_acl(conn->hcon, skb, 0);
211 /* ---- Socket interface ---- */
212 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
215 struct hlist_node *node;
216 sk_for_each(sk, node, &l2cap_sk_list.head)
217 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
224 /* Find socket with psm and source bdaddr.
225 * Returns closest match.
227 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
229 struct sock *sk = NULL, *sk1 = NULL;
230 struct hlist_node *node;
232 sk_for_each(sk, node, &l2cap_sk_list.head) {
233 if (state && sk->sk_state != state)
236 if (l2cap_pi(sk)->psm == psm) {
238 if (!bacmp(&bt_sk(sk)->src, src))
242 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
246 return node ? sk : sk1;
249 /* Find socket with given address (psm, src).
250 * Returns locked socket */
251 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
254 read_lock(&l2cap_sk_list.lock);
255 s = __l2cap_get_sock_by_psm(state, psm, src);
256 if (s) bh_lock_sock(s);
257 read_unlock(&l2cap_sk_list.lock);
261 static void l2cap_sock_destruct(struct sock *sk)
265 skb_queue_purge(&sk->sk_receive_queue);
266 skb_queue_purge(&sk->sk_write_queue);
269 static void l2cap_sock_cleanup_listen(struct sock *parent)
273 BT_DBG("parent %p", parent);
275 /* Close not yet accepted channels */
276 while ((sk = bt_accept_dequeue(parent, NULL)))
277 l2cap_sock_close(sk);
279 parent->sk_state = BT_CLOSED;
280 sock_set_flag(parent, SOCK_ZAPPED);
283 /* Kill socket (only if zapped and orphan)
284 * Must be called on unlocked socket.
286 static void l2cap_sock_kill(struct sock *sk)
288 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
291 BT_DBG("sk %p state %d", sk, sk->sk_state);
293 /* Kill poor orphan */
294 bt_sock_unlink(&l2cap_sk_list, sk);
295 sock_set_flag(sk, SOCK_DEAD);
299 static void __l2cap_sock_close(struct sock *sk, int reason)
301 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
303 switch (sk->sk_state) {
305 l2cap_sock_cleanup_listen(sk);
311 if (sk->sk_type == SOCK_SEQPACKET) {
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
313 struct l2cap_disconn_req req;
315 sk->sk_state = BT_DISCONN;
316 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
318 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
319 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
320 l2cap_send_cmd(conn, l2cap_get_ident(conn),
321 L2CAP_DISCONN_REQ, sizeof(req), &req);
323 l2cap_chan_del(sk, reason);
329 l2cap_chan_del(sk, reason);
333 sock_set_flag(sk, SOCK_ZAPPED);
338 /* Must be called on unlocked socket. */
339 static void l2cap_sock_close(struct sock *sk)
341 l2cap_sock_clear_timer(sk);
343 __l2cap_sock_close(sk, ECONNRESET);
348 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
350 struct l2cap_pinfo *pi = l2cap_pi(sk);
355 sk->sk_type = parent->sk_type;
356 pi->imtu = l2cap_pi(parent)->imtu;
357 pi->omtu = l2cap_pi(parent)->omtu;
358 pi->link_mode = l2cap_pi(parent)->link_mode;
360 pi->imtu = L2CAP_DEFAULT_MTU;
365 /* Default config options */
366 pi->conf_mtu = L2CAP_DEFAULT_MTU;
367 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
370 static struct proto l2cap_proto = {
372 .owner = THIS_MODULE,
373 .obj_size = sizeof(struct l2cap_pinfo)
376 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
380 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
384 sock_init_data(sock, sk);
385 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
387 sk->sk_destruct = l2cap_sock_destruct;
388 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
390 sock_reset_flag(sk, SOCK_ZAPPED);
392 sk->sk_protocol = proto;
393 sk->sk_state = BT_OPEN;
395 l2cap_sock_init_timer(sk);
397 bt_sock_link(&l2cap_sk_list, sk);
401 static int l2cap_sock_create(struct socket *sock, int protocol)
405 BT_DBG("sock %p", sock);
407 sock->state = SS_UNCONNECTED;
409 if (sock->type != SOCK_SEQPACKET &&
410 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
411 return -ESOCKTNOSUPPORT;
413 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
416 sock->ops = &l2cap_sock_ops;
418 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
422 l2cap_sock_init(sk, NULL);
426 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
428 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
429 struct sock *sk = sock->sk;
432 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
434 if (!addr || addr->sa_family != AF_BLUETOOTH)
439 if (sk->sk_state != BT_OPEN) {
444 write_lock_bh(&l2cap_sk_list.lock);
446 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
449 /* Save source address */
450 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
451 l2cap_pi(sk)->psm = la->l2_psm;
452 l2cap_pi(sk)->sport = la->l2_psm;
453 sk->sk_state = BT_BOUND;
456 write_unlock_bh(&l2cap_sk_list.lock);
463 static int l2cap_do_connect(struct sock *sk)
465 bdaddr_t *src = &bt_sk(sk)->src;
466 bdaddr_t *dst = &bt_sk(sk)->dst;
467 struct l2cap_conn *conn;
468 struct hci_conn *hcon;
469 struct hci_dev *hdev;
472 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
474 if (!(hdev = hci_get_route(dst, src)))
475 return -EHOSTUNREACH;
477 hci_dev_lock_bh(hdev);
481 hcon = hci_connect(hdev, ACL_LINK, dst);
485 conn = l2cap_conn_add(hcon, 0);
493 /* Update source addr of the socket */
494 bacpy(src, conn->src);
496 l2cap_chan_add(conn, sk, NULL);
498 sk->sk_state = BT_CONNECT;
499 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
501 if (hcon->state == BT_CONNECTED) {
502 if (sk->sk_type == SOCK_SEQPACKET) {
503 struct l2cap_conn_req req;
504 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
505 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
506 req.psm = l2cap_pi(sk)->psm;
507 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
508 L2CAP_CONN_REQ, sizeof(req), &req);
510 l2cap_sock_clear_timer(sk);
511 sk->sk_state = BT_CONNECTED;
516 hci_dev_unlock_bh(hdev);
521 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
523 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
524 struct sock *sk = sock->sk;
531 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
536 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
541 switch(sk->sk_state) {
545 /* Already connecting */
549 /* Already connected */
562 /* Set destination address and psm */
563 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
564 l2cap_pi(sk)->psm = la->l2_psm;
566 if ((err = l2cap_do_connect(sk)))
570 err = bt_sock_wait_state(sk, BT_CONNECTED,
571 sock_sndtimeo(sk, flags & O_NONBLOCK));
577 static int l2cap_sock_listen(struct socket *sock, int backlog)
579 struct sock *sk = sock->sk;
582 BT_DBG("sk %p backlog %d", sk, backlog);
586 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
591 if (!l2cap_pi(sk)->psm) {
592 bdaddr_t *src = &bt_sk(sk)->src;
597 write_lock_bh(&l2cap_sk_list.lock);
599 for (psm = 0x1001; psm < 0x1100; psm += 2)
600 if (!__l2cap_get_sock_by_addr(psm, src)) {
601 l2cap_pi(sk)->psm = htobs(psm);
602 l2cap_pi(sk)->sport = htobs(psm);
607 write_unlock_bh(&l2cap_sk_list.lock);
613 sk->sk_max_ack_backlog = backlog;
614 sk->sk_ack_backlog = 0;
615 sk->sk_state = BT_LISTEN;
622 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
624 DECLARE_WAITQUEUE(wait, current);
625 struct sock *sk = sock->sk, *nsk;
631 if (sk->sk_state != BT_LISTEN) {
636 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
638 BT_DBG("sk %p timeo %ld", sk, timeo);
640 /* Wait for an incoming connection. (wake-one). */
641 add_wait_queue_exclusive(sk->sk_sleep, &wait);
642 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
643 set_current_state(TASK_INTERRUPTIBLE);
650 timeo = schedule_timeout(timeo);
653 if (sk->sk_state != BT_LISTEN) {
658 if (signal_pending(current)) {
659 err = sock_intr_errno(timeo);
663 set_current_state(TASK_RUNNING);
664 remove_wait_queue(sk->sk_sleep, &wait);
669 newsock->state = SS_CONNECTED;
671 BT_DBG("new socket %p", nsk);
678 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
680 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
681 struct sock *sk = sock->sk;
683 BT_DBG("sock %p, sk %p", sock, sk);
685 addr->sa_family = AF_BLUETOOTH;
686 *len = sizeof(struct sockaddr_l2);
689 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
691 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
693 la->l2_psm = l2cap_pi(sk)->psm;
697 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
700 struct sk_buff *skb, **frag;
701 int err, hlen, count, sent=0;
702 struct l2cap_hdr *lh;
704 BT_DBG("sk %p len %d", sk, len);
706 /* First fragment (with L2CAP header) */
707 if (sk->sk_type == SOCK_DGRAM)
708 hlen = L2CAP_HDR_SIZE + 2;
710 hlen = L2CAP_HDR_SIZE;
712 count = min_t(unsigned int, (conn->mtu - hlen), len);
714 skb = bt_skb_send_alloc(sk, hlen + count,
715 msg->msg_flags & MSG_DONTWAIT, &err);
719 /* Create L2CAP header */
720 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
721 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
722 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
724 if (sk->sk_type == SOCK_DGRAM)
725 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
727 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
735 /* Continuation fragments (no L2CAP header) */
736 frag = &skb_shinfo(skb)->frag_list;
738 count = min_t(unsigned int, conn->mtu, len);
740 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
744 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
752 frag = &(*frag)->next;
755 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
765 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
767 struct sock *sk = sock->sk;
770 BT_DBG("sock %p, sk %p", sock, sk);
773 return sock_error(sk);
775 if (msg->msg_flags & MSG_OOB)
778 /* Check outgoing MTU */
779 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
784 if (sk->sk_state == BT_CONNECTED)
785 err = l2cap_do_send(sk, msg, len);
793 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
795 struct sock *sk = sock->sk;
796 struct l2cap_options opts;
806 len = min_t(unsigned int, sizeof(opts), optlen);
807 if (copy_from_user((char *) &opts, optval, len)) {
811 l2cap_pi(sk)->imtu = opts.imtu;
812 l2cap_pi(sk)->omtu = opts.omtu;
816 if (get_user(opt, (u32 __user *) optval)) {
821 l2cap_pi(sk)->link_mode = opt;
833 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
835 struct sock *sk = sock->sk;
836 struct l2cap_options opts;
837 struct l2cap_conninfo cinfo;
842 if (get_user(len, optlen))
849 opts.imtu = l2cap_pi(sk)->imtu;
850 opts.omtu = l2cap_pi(sk)->omtu;
851 opts.flush_to = l2cap_pi(sk)->flush_to;
854 len = min_t(unsigned int, len, sizeof(opts));
855 if (copy_to_user(optval, (char *) &opts, len))
861 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
866 if (sk->sk_state != BT_CONNECTED) {
871 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
872 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
874 len = min_t(unsigned int, len, sizeof(cinfo));
875 if (copy_to_user(optval, (char *) &cinfo, len))
889 static int l2cap_sock_shutdown(struct socket *sock, int how)
891 struct sock *sk = sock->sk;
894 BT_DBG("sock %p, sk %p", sock, sk);
900 if (!sk->sk_shutdown) {
901 sk->sk_shutdown = SHUTDOWN_MASK;
902 l2cap_sock_clear_timer(sk);
903 __l2cap_sock_close(sk, 0);
905 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
906 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
912 static int l2cap_sock_release(struct socket *sock)
914 struct sock *sk = sock->sk;
917 BT_DBG("sock %p, sk %p", sock, sk);
922 err = l2cap_sock_shutdown(sock, 2);
929 /* ---- L2CAP channels ---- */
930 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
933 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
934 if (l2cap_pi(s)->dcid == cid)
940 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
943 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
944 if (l2cap_pi(s)->scid == cid)
950 /* Find channel with given SCID.
951 * Returns locked socket */
952 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
956 s = __l2cap_get_chan_by_scid(l, cid);
957 if (s) bh_lock_sock(s);
958 read_unlock(&l->lock);
962 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
965 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
966 if (l2cap_pi(s)->ident == ident)
972 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
976 s = __l2cap_get_chan_by_ident(l, ident);
977 if (s) bh_lock_sock(s);
978 read_unlock(&l->lock);
982 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
986 for (; cid < 0xffff; cid++) {
987 if(!__l2cap_get_chan_by_scid(l, cid))
994 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
999 l2cap_pi(l->head)->prev_c = sk;
1001 l2cap_pi(sk)->next_c = l->head;
1002 l2cap_pi(sk)->prev_c = NULL;
1006 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1008 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1010 write_lock(&l->lock);
1015 l2cap_pi(next)->prev_c = prev;
1017 l2cap_pi(prev)->next_c = next;
1018 write_unlock(&l->lock);
1023 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1025 struct l2cap_chan_list *l = &conn->chan_list;
1027 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1029 l2cap_pi(sk)->conn = conn;
1031 if (sk->sk_type == SOCK_SEQPACKET) {
1032 /* Alloc CID for connection-oriented socket */
1033 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1034 } else if (sk->sk_type == SOCK_DGRAM) {
1035 /* Connectionless socket */
1036 l2cap_pi(sk)->scid = 0x0002;
1037 l2cap_pi(sk)->dcid = 0x0002;
1038 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1040 /* Raw socket can send/recv signalling messages only */
1041 l2cap_pi(sk)->scid = 0x0001;
1042 l2cap_pi(sk)->dcid = 0x0001;
1043 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1046 __l2cap_chan_link(l, sk);
1049 bt_accept_enqueue(parent, sk);
1053 * Must be called on the locked socket. */
1054 static void l2cap_chan_del(struct sock *sk, int err)
1056 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1057 struct sock *parent = bt_sk(sk)->parent;
1059 l2cap_sock_clear_timer(sk);
1061 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1064 /* Unlink from channel list */
1065 l2cap_chan_unlink(&conn->chan_list, sk);
1066 l2cap_pi(sk)->conn = NULL;
1067 hci_conn_put(conn->hcon);
1070 sk->sk_state = BT_CLOSED;
1071 sock_set_flag(sk, SOCK_ZAPPED);
1077 bt_accept_unlink(sk);
1078 parent->sk_data_ready(parent, 0);
1080 sk->sk_state_change(sk);
1083 static void l2cap_conn_ready(struct l2cap_conn *conn)
1085 struct l2cap_chan_list *l = &conn->chan_list;
1088 BT_DBG("conn %p", conn);
1090 read_lock(&l->lock);
1092 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1095 if (sk->sk_type != SOCK_SEQPACKET) {
1096 l2cap_sock_clear_timer(sk);
1097 sk->sk_state = BT_CONNECTED;
1098 sk->sk_state_change(sk);
1099 } else if (sk->sk_state == BT_CONNECT) {
1100 struct l2cap_conn_req req;
1101 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1102 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1103 req.psm = l2cap_pi(sk)->psm;
1104 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1110 read_unlock(&l->lock);
1113 /* Notify sockets that we cannot guaranty reliability anymore */
1114 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1116 struct l2cap_chan_list *l = &conn->chan_list;
1119 BT_DBG("conn %p", conn);
1121 read_lock(&l->lock);
1122 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1123 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1126 read_unlock(&l->lock);
1129 static void l2cap_chan_ready(struct sock *sk)
1131 struct sock *parent = bt_sk(sk)->parent;
1133 BT_DBG("sk %p, parent %p", sk, parent);
1135 l2cap_pi(sk)->conf_state = 0;
1136 l2cap_sock_clear_timer(sk);
1139 /* Outgoing channel.
1140 * Wake up socket sleeping on connect.
1142 sk->sk_state = BT_CONNECTED;
1143 sk->sk_state_change(sk);
1145 /* Incoming channel.
1146 * Wake up socket sleeping on accept.
1148 parent->sk_data_ready(parent, 0);
1152 /* Copy frame to all raw sockets on that connection */
1153 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1155 struct l2cap_chan_list *l = &conn->chan_list;
1156 struct sk_buff *nskb;
1159 BT_DBG("conn %p", conn);
1161 read_lock(&l->lock);
1162 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1163 if (sk->sk_type != SOCK_RAW)
1166 /* Don't send frame to the socket it came from */
1170 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1173 if (sock_queue_rcv_skb(sk, nskb))
1176 read_unlock(&l->lock);
1179 /* ---- L2CAP signalling commands ---- */
1180 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1181 u8 code, u8 ident, u16 dlen, void *data)
1183 struct sk_buff *skb, **frag;
1184 struct l2cap_cmd_hdr *cmd;
1185 struct l2cap_hdr *lh;
1188 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1190 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1191 count = min_t(unsigned int, conn->mtu, len);
1193 skb = bt_skb_alloc(count, GFP_ATOMIC);
1197 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1198 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1199 lh->cid = __cpu_to_le16(0x0001);
1201 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1204 cmd->len = __cpu_to_le16(dlen);
1207 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1208 memcpy(skb_put(skb, count), data, count);
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1217 count = min_t(unsigned int, conn->mtu, len);
1219 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1223 memcpy(skb_put(*frag, count), data, count);
1228 frag = &(*frag)->next;
1238 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1240 struct l2cap_conf_opt *opt = *ptr;
1243 len = L2CAP_CONF_OPT_SIZE + opt->len;
1251 *val = *((u8 *) opt->val);
1255 *val = __le16_to_cpu(*((u16 *)opt->val));
1259 *val = __le32_to_cpu(*((u32 *)opt->val));
1263 *val = (unsigned long) opt->val;
1267 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1271 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1273 int type, hint, olen;
1277 BT_DBG("sk %p len %d", sk, len);
1279 while (len >= L2CAP_CONF_OPT_SIZE) {
1280 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1286 case L2CAP_CONF_MTU:
1287 l2cap_pi(sk)->conf_mtu = val;
1290 case L2CAP_CONF_FLUSH_TO:
1291 l2cap_pi(sk)->flush_to = val;
1294 case L2CAP_CONF_QOS:
1301 /* FIXME: Reject unknown option */
1307 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1309 struct l2cap_conf_opt *opt = *ptr;
1311 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1318 *((u8 *) opt->val) = val;
1322 *((u16 *) opt->val) = __cpu_to_le16(val);
1326 *((u32 *) opt->val) = __cpu_to_le32(val);
1330 memcpy(opt->val, (void *) val, len);
1334 *ptr += L2CAP_CONF_OPT_SIZE + len;
1337 static int l2cap_build_conf_req(struct sock *sk, void *data)
1339 struct l2cap_pinfo *pi = l2cap_pi(sk);
1340 struct l2cap_conf_req *req = data;
1341 void *ptr = req->data;
1343 BT_DBG("sk %p", sk);
1345 if (pi->imtu != L2CAP_DEFAULT_MTU)
1346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1348 /* FIXME: Need actual value of the flush timeout */
1349 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1350 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1352 req->dcid = __cpu_to_le16(pi->dcid);
1353 req->flags = __cpu_to_le16(0);
1358 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1360 struct l2cap_pinfo *pi = l2cap_pi(sk);
1363 /* Configure output options and let the other side know
1364 * which ones we don't like. */
1365 if (pi->conf_mtu < pi->omtu) {
1366 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1367 result = L2CAP_CONF_UNACCEPT;
1369 pi->omtu = pi->conf_mtu;
1372 BT_DBG("sk %p result %d", sk, result);
1376 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1378 struct l2cap_conf_rsp *rsp = data;
1379 void *ptr = rsp->data;
1382 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1385 *result = l2cap_conf_output(sk, &ptr);
1389 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1390 rsp->result = __cpu_to_le16(result ? *result : 0);
1391 rsp->flags = __cpu_to_le16(flags);
1396 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1398 struct l2cap_chan_list *list = &conn->chan_list;
1399 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1400 struct l2cap_conn_rsp rsp;
1401 struct sock *sk, *parent;
1402 int result = 0, status = 0;
1404 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1407 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1409 /* Check if we have socket listening on psm */
1410 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1412 result = L2CAP_CR_BAD_PSM;
1416 result = L2CAP_CR_NO_MEM;
1418 /* Check for backlog size */
1419 if (sk_acceptq_is_full(parent)) {
1420 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1424 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1428 write_lock(&list->lock);
1430 /* Check if we already have channel with that dcid */
1431 if (__l2cap_get_chan_by_dcid(list, scid)) {
1432 write_unlock(&list->lock);
1433 sock_set_flag(sk, SOCK_ZAPPED);
1434 l2cap_sock_kill(sk);
1438 hci_conn_hold(conn->hcon);
1440 l2cap_sock_init(sk, parent);
1441 bacpy(&bt_sk(sk)->src, conn->src);
1442 bacpy(&bt_sk(sk)->dst, conn->dst);
1443 l2cap_pi(sk)->psm = psm;
1444 l2cap_pi(sk)->dcid = scid;
1446 __l2cap_chan_add(conn, sk, parent);
1447 dcid = l2cap_pi(sk)->scid;
1449 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1451 /* Service level security */
1452 result = L2CAP_CR_PEND;
1453 status = L2CAP_CS_AUTHEN_PEND;
1454 sk->sk_state = BT_CONNECT2;
1455 l2cap_pi(sk)->ident = cmd->ident;
1457 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1458 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1459 if (!hci_conn_encrypt(conn->hcon))
1461 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1462 if (!hci_conn_auth(conn->hcon))
1466 sk->sk_state = BT_CONFIG;
1467 result = status = 0;
1470 write_unlock(&list->lock);
1473 bh_unlock_sock(parent);
1476 rsp.scid = __cpu_to_le16(scid);
1477 rsp.dcid = __cpu_to_le16(dcid);
1478 rsp.result = __cpu_to_le16(result);
1479 rsp.status = __cpu_to_le16(status);
1480 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1484 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1486 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1487 u16 scid, dcid, result, status;
1491 scid = __le16_to_cpu(rsp->scid);
1492 dcid = __le16_to_cpu(rsp->dcid);
1493 result = __le16_to_cpu(rsp->result);
1494 status = __le16_to_cpu(rsp->status);
1496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1499 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1502 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1507 case L2CAP_CR_SUCCESS:
1508 sk->sk_state = BT_CONFIG;
1509 l2cap_pi(sk)->ident = 0;
1510 l2cap_pi(sk)->dcid = dcid;
1511 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1513 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1514 l2cap_build_conf_req(sk, req), req);
1521 l2cap_chan_del(sk, ECONNREFUSED);
1529 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1531 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1537 dcid = __le16_to_cpu(req->dcid);
1538 flags = __le16_to_cpu(req->flags);
1540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1542 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1545 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1547 if (flags & 0x0001) {
1548 /* Incomplete config. Send empty response. */
1549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1550 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1554 /* Complete config. */
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1561 /* Output config done */
1562 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1564 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1565 sk->sk_state = BT_CONNECTED;
1566 l2cap_chan_ready(sk);
1567 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(sk, req), req);
1578 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1580 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1581 u16 scid, flags, result;
1584 scid = __le16_to_cpu(rsp->scid);
1585 flags = __le16_to_cpu(rsp->flags);
1586 result = __le16_to_cpu(rsp->result);
1588 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1590 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1594 case L2CAP_CONF_SUCCESS:
1597 case L2CAP_CONF_UNACCEPT:
1598 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1600 /* It does not make sense to adjust L2CAP parameters
1601 * that are currently defined in the spec. We simply
1602 * resend config request that we sent earlier. It is
1603 * stupid, but it helps qualification testing which
1604 * expects at least some response from us. */
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1606 l2cap_build_conf_req(sk, req), req);
1611 sk->sk_state = BT_DISCONN;
1612 sk->sk_err = ECONNRESET;
1613 l2cap_sock_set_timer(sk, HZ * 5);
1615 struct l2cap_disconn_req req;
1616 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1617 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1618 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1619 L2CAP_DISCONN_REQ, sizeof(req), &req);
1627 /* Input config done */
1628 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1630 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1631 sk->sk_state = BT_CONNECTED;
1632 l2cap_chan_ready(sk);
1640 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1642 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1643 struct l2cap_disconn_rsp rsp;
1647 scid = __le16_to_cpu(req->scid);
1648 dcid = __le16_to_cpu(req->dcid);
1650 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1652 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1655 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1656 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1657 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1659 sk->sk_shutdown = SHUTDOWN_MASK;
1661 l2cap_chan_del(sk, ECONNRESET);
1664 l2cap_sock_kill(sk);
1668 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1670 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1674 scid = __le16_to_cpu(rsp->scid);
1675 dcid = __le16_to_cpu(rsp->dcid);
1677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1679 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1682 l2cap_chan_del(sk, 0);
1685 l2cap_sock_kill(sk);
1689 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1691 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1692 struct l2cap_info_rsp rsp;
1695 type = __le16_to_cpu(req->type);
1697 BT_DBG("type 0x%4.4x", type);
1699 rsp.type = __cpu_to_le16(type);
1700 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1701 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1706 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1708 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1711 type = __le16_to_cpu(rsp->type);
1712 result = __le16_to_cpu(rsp->result);
1714 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1719 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1721 u8 *data = skb->data;
1723 struct l2cap_cmd_hdr cmd;
1726 l2cap_raw_recv(conn, skb);
1728 while (len >= L2CAP_CMD_HDR_SIZE) {
1729 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1730 data += L2CAP_CMD_HDR_SIZE;
1731 len -= L2CAP_CMD_HDR_SIZE;
1733 cmd.len = __le16_to_cpu(cmd.len);
1735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1737 if (cmd.len > len || !cmd.ident) {
1738 BT_DBG("corrupted command");
1743 case L2CAP_COMMAND_REJ:
1744 /* FIXME: We should process this */
1747 case L2CAP_CONN_REQ:
1748 err = l2cap_connect_req(conn, &cmd, data);
1751 case L2CAP_CONN_RSP:
1752 err = l2cap_connect_rsp(conn, &cmd, data);
1755 case L2CAP_CONF_REQ:
1756 err = l2cap_config_req(conn, &cmd, data);
1759 case L2CAP_CONF_RSP:
1760 err = l2cap_config_rsp(conn, &cmd, data);
1763 case L2CAP_DISCONN_REQ:
1764 err = l2cap_disconnect_req(conn, &cmd, data);
1767 case L2CAP_DISCONN_RSP:
1768 err = l2cap_disconnect_rsp(conn, &cmd, data);
1771 case L2CAP_ECHO_REQ:
1772 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1775 case L2CAP_ECHO_RSP:
1778 case L2CAP_INFO_REQ:
1779 err = l2cap_information_req(conn, &cmd, data);
1782 case L2CAP_INFO_RSP:
1783 err = l2cap_information_rsp(conn, &cmd, data);
1787 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1793 struct l2cap_cmd_rej rej;
1794 BT_DBG("error %d", err);
1796 /* FIXME: Map err to a valid reason */
1797 rej.reason = __cpu_to_le16(0);
1798 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1808 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1812 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1814 BT_DBG("unknown cid 0x%4.4x", cid);
1818 BT_DBG("sk %p, len %d", sk, skb->len);
1820 if (sk->sk_state != BT_CONNECTED)
1823 if (l2cap_pi(sk)->imtu < skb->len)
1826 /* If socket recv buffers overflows we drop data here
1827 * which is *bad* because L2CAP has to be reliable.
1828 * But we don't have any other choice. L2CAP doesn't
1829 * provide flow control mechanism. */
1831 if (!sock_queue_rcv_skb(sk, skb))
1838 if (sk) bh_unlock_sock(sk);
1842 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1846 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1850 BT_DBG("sk %p, len %d", sk, skb->len);
1852 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1855 if (l2cap_pi(sk)->imtu < skb->len)
1858 if (!sock_queue_rcv_skb(sk, skb))
1865 if (sk) bh_unlock_sock(sk);
1869 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1871 struct l2cap_hdr *lh = (void *) skb->data;
1874 skb_pull(skb, L2CAP_HDR_SIZE);
1875 cid = __le16_to_cpu(lh->cid);
1876 len = __le16_to_cpu(lh->len);
1878 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1882 l2cap_sig_channel(conn, skb);
1886 psm = get_unaligned((u16 *) skb->data);
1888 l2cap_conless_channel(conn, psm, skb);
1892 l2cap_data_channel(conn, cid, skb);
1897 /* ---- L2CAP interface with lower layer (HCI) ---- */
1899 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1901 int exact = 0, lm1 = 0, lm2 = 0;
1902 register struct sock *sk;
1903 struct hlist_node *node;
1905 if (type != ACL_LINK)
1908 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1910 /* Find listening sockets and check their link_mode */
1911 read_lock(&l2cap_sk_list.lock);
1912 sk_for_each(sk, node, &l2cap_sk_list.head) {
1913 if (sk->sk_state != BT_LISTEN)
1916 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1917 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1919 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1920 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1922 read_unlock(&l2cap_sk_list.lock);
1924 return exact ? lm1 : lm2;
1927 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1929 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1931 if (hcon->type != ACL_LINK)
1935 struct l2cap_conn *conn;
1937 conn = l2cap_conn_add(hcon, status);
1939 l2cap_conn_ready(conn);
1941 l2cap_conn_del(hcon, bt_err(status));
1946 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1948 BT_DBG("hcon %p reason %d", hcon, reason);
1950 if (hcon->type != ACL_LINK)
1953 l2cap_conn_del(hcon, bt_err(reason));
1957 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1959 struct l2cap_chan_list *l;
1960 struct l2cap_conn *conn;
1961 struct l2cap_conn_rsp rsp;
1965 if (!(conn = hcon->l2cap_data))
1967 l = &conn->chan_list;
1969 BT_DBG("conn %p", conn);
1971 read_lock(&l->lock);
1973 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1976 if (sk->sk_state != BT_CONNECT2 ||
1977 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1978 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1984 sk->sk_state = BT_CONFIG;
1987 sk->sk_state = BT_DISCONN;
1988 l2cap_sock_set_timer(sk, HZ/10);
1989 result = L2CAP_CR_SEC_BLOCK;
1992 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1993 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1994 rsp.result = __cpu_to_le16(result);
1995 rsp.status = __cpu_to_le16(0);
1996 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1997 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2002 read_unlock(&l->lock);
2006 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2008 struct l2cap_chan_list *l;
2009 struct l2cap_conn *conn;
2010 struct l2cap_conn_rsp rsp;
2014 if (!(conn = hcon->l2cap_data))
2016 l = &conn->chan_list;
2018 BT_DBG("conn %p", conn);
2020 read_lock(&l->lock);
2022 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2025 if (sk->sk_state != BT_CONNECT2) {
2031 sk->sk_state = BT_CONFIG;
2034 sk->sk_state = BT_DISCONN;
2035 l2cap_sock_set_timer(sk, HZ/10);
2036 result = L2CAP_CR_SEC_BLOCK;
2039 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2040 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2041 rsp.result = __cpu_to_le16(result);
2042 rsp.status = __cpu_to_le16(0);
2043 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2044 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2046 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2047 hci_conn_change_link_key(hcon);
2052 read_unlock(&l->lock);
2056 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2058 struct l2cap_conn *conn = hcon->l2cap_data;
2060 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2063 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2065 if (flags & ACL_START) {
2066 struct l2cap_hdr *hdr;
2070 BT_ERR("Unexpected start frame (len %d)", skb->len);
2071 kfree_skb(conn->rx_skb);
2072 conn->rx_skb = NULL;
2074 l2cap_conn_unreliable(conn, ECOMM);
2078 BT_ERR("Frame is too short (len %d)", skb->len);
2079 l2cap_conn_unreliable(conn, ECOMM);
2083 hdr = (struct l2cap_hdr *) skb->data;
2084 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2086 if (len == skb->len) {
2087 /* Complete frame received */
2088 l2cap_recv_frame(conn, skb);
2092 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2094 if (skb->len > len) {
2095 BT_ERR("Frame is too long (len %d, expected len %d)",
2097 l2cap_conn_unreliable(conn, ECOMM);
2101 /* Allocate skb for the complete frame (with header) */
2102 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2105 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2106 conn->rx_len = len - skb->len;
2108 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2110 if (!conn->rx_len) {
2111 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2112 l2cap_conn_unreliable(conn, ECOMM);
2116 if (skb->len > conn->rx_len) {
2117 BT_ERR("Fragment is too long (len %d, expected %d)",
2118 skb->len, conn->rx_len);
2119 kfree_skb(conn->rx_skb);
2120 conn->rx_skb = NULL;
2122 l2cap_conn_unreliable(conn, ECOMM);
2126 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2127 conn->rx_len -= skb->len;
2129 if (!conn->rx_len) {
2130 /* Complete frame received */
2131 l2cap_recv_frame(conn, conn->rx_skb);
2132 conn->rx_skb = NULL;
2141 /* ---- Proc fs support ---- */
2142 #ifdef CONFIG_PROC_FS
2143 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2146 struct hlist_node *node;
2149 read_lock_bh(&l2cap_sk_list.lock);
2151 sk_for_each(sk, node, &l2cap_sk_list.head)
2159 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2165 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2167 read_unlock_bh(&l2cap_sk_list.lock);
2170 static int l2cap_seq_show(struct seq_file *seq, void *e)
2172 struct sock *sk = e;
2173 struct l2cap_pinfo *pi = l2cap_pi(sk);
2175 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2176 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2177 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2178 pi->omtu, pi->link_mode);
2182 static struct seq_operations l2cap_seq_ops = {
2183 .start = l2cap_seq_start,
2184 .next = l2cap_seq_next,
2185 .stop = l2cap_seq_stop,
2186 .show = l2cap_seq_show
2189 static int l2cap_seq_open(struct inode *inode, struct file *file)
2191 return seq_open(file, &l2cap_seq_ops);
2194 static struct file_operations l2cap_seq_fops = {
2195 .owner = THIS_MODULE,
2196 .open = l2cap_seq_open,
2198 .llseek = seq_lseek,
2199 .release = seq_release,
2202 static int __init l2cap_proc_init(void)
2204 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2207 p->owner = THIS_MODULE;
2208 p->proc_fops = &l2cap_seq_fops;
2212 static void __exit l2cap_proc_cleanup(void)
2214 remove_proc_entry("l2cap", proc_bt);
2217 #else /* CONFIG_PROC_FS */
2219 static int __init l2cap_proc_init(void)
2224 static void __exit l2cap_proc_cleanup(void)
2228 #endif /* CONFIG_PROC_FS */
2230 static struct proto_ops l2cap_sock_ops = {
2231 .family = PF_BLUETOOTH,
2232 .owner = THIS_MODULE,
2233 .release = l2cap_sock_release,
2234 .bind = l2cap_sock_bind,
2235 .connect = l2cap_sock_connect,
2236 .listen = l2cap_sock_listen,
2237 .accept = l2cap_sock_accept,
2238 .getname = l2cap_sock_getname,
2239 .sendmsg = l2cap_sock_sendmsg,
2240 .recvmsg = bt_sock_recvmsg,
2241 .poll = bt_sock_poll,
2242 .mmap = sock_no_mmap,
2243 .socketpair = sock_no_socketpair,
2244 .ioctl = sock_no_ioctl,
2245 .shutdown = l2cap_sock_shutdown,
2246 .setsockopt = l2cap_sock_setsockopt,
2247 .getsockopt = l2cap_sock_getsockopt
2250 static struct net_proto_family l2cap_sock_family_ops = {
2251 .family = PF_BLUETOOTH,
2252 .owner = THIS_MODULE,
2253 .create = l2cap_sock_create,
2256 static struct hci_proto l2cap_hci_proto = {
2258 .id = HCI_PROTO_L2CAP,
2259 .connect_ind = l2cap_connect_ind,
2260 .connect_cfm = l2cap_connect_cfm,
2261 .disconn_ind = l2cap_disconn_ind,
2262 .auth_cfm = l2cap_auth_cfm,
2263 .encrypt_cfm = l2cap_encrypt_cfm,
2264 .recv_acldata = l2cap_recv_acldata
2267 static int __init l2cap_init(void)
2271 err = proto_register(&l2cap_proto, 0);
2275 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2277 BT_ERR("L2CAP socket registration failed");
2281 err = hci_register_proto(&l2cap_hci_proto);
2283 BT_ERR("L2CAP protocol registration failed");
2284 bt_sock_unregister(BTPROTO_L2CAP);
2290 BT_INFO("L2CAP ver %s", VERSION);
2291 BT_INFO("L2CAP socket layer initialized");
2296 proto_unregister(&l2cap_proto);
2300 static void __exit l2cap_exit(void)
2302 l2cap_proc_cleanup();
2304 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2305 BT_ERR("L2CAP socket unregistration failed");
2307 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2308 BT_ERR("L2CAP protocol unregistration failed");
2310 proto_unregister(&l2cap_proto);
2313 void l2cap_load(void)
2315 /* Dummy function to trigger automatic L2CAP module loading by
2316 * other modules that use L2CAP sockets but don't use any other
2317 * symbols from it. */
2320 EXPORT_SYMBOL(l2cap_load);
2322 module_init(l2cap_init);
2323 module_exit(l2cap_exit);
2325 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2326 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2327 MODULE_VERSION(VERSION);
2328 MODULE_LICENSE("GPL");
2329 MODULE_ALIAS("bt-proto-0");