2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/list.h>
43 #include <linux/device.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #ifndef CONFIG_BT_L2CAP_DEBUG
61 static const struct proto_ops l2cap_sock_ops
;
63 static struct bt_sock_list l2cap_sk_list
= {
64 .lock
= RW_LOCK_UNLOCKED
67 static int l2cap_conn_del(struct hci_conn
*conn
, int err
);
69 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
);
70 static void l2cap_chan_del(struct sock
*sk
, int err
);
72 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
73 static void l2cap_sock_close(struct sock
*sk
);
74 static void l2cap_sock_kill(struct sock
*sk
);
76 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
77 u8 code
, u8 ident
, u16 dlen
, void *data
);
79 /* ---- L2CAP timers ---- */
80 static void l2cap_sock_timeout(unsigned long arg
)
82 struct sock
*sk
= (struct sock
*) arg
;
84 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
87 __l2cap_sock_close(sk
, ETIMEDOUT
);
94 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
96 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
97 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
100 static void l2cap_sock_clear_timer(struct sock
*sk
)
102 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
103 sk_stop_timer(sk
, &sk
->sk_timer
);
106 static void l2cap_sock_init_timer(struct sock
*sk
)
108 init_timer(&sk
->sk_timer
);
109 sk
->sk_timer
.function
= l2cap_sock_timeout
;
110 sk
->sk_timer
.data
= (unsigned long)sk
;
113 /* ---- L2CAP connections ---- */
114 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
116 struct l2cap_conn
*conn
;
118 if ((conn
= hcon
->l2cap_data
))
124 if (!(conn
= kmalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
)))
126 memset(conn
, 0, sizeof(struct l2cap_conn
));
128 hcon
->l2cap_data
= conn
;
131 conn
->mtu
= hcon
->hdev
->acl_mtu
;
132 conn
->src
= &hcon
->hdev
->bdaddr
;
133 conn
->dst
= &hcon
->dst
;
135 spin_lock_init(&conn
->lock
);
136 rwlock_init(&conn
->chan_list
.lock
);
138 BT_DBG("hcon %p conn %p", hcon
, conn
);
142 static int l2cap_conn_del(struct hci_conn
*hcon
, int err
)
144 struct l2cap_conn
*conn
;
147 if (!(conn
= hcon
->l2cap_data
))
150 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
153 kfree_skb(conn
->rx_skb
);
156 while ((sk
= conn
->chan_list
.head
)) {
158 l2cap_chan_del(sk
, err
);
163 hcon
->l2cap_data
= NULL
;
168 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
170 struct l2cap_chan_list
*l
= &conn
->chan_list
;
171 write_lock(&l
->lock
);
172 __l2cap_chan_add(conn
, sk
, parent
);
173 write_unlock(&l
->lock
);
176 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
180 /* Get next available identificator.
181 * 1 - 128 are used by kernel.
182 * 129 - 199 are reserved.
183 * 200 - 254 are used by utilities like l2ping, etc.
186 spin_lock(&conn
->lock
);
188 if (++conn
->tx_ident
> 128)
193 spin_unlock(&conn
->lock
);
198 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
200 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
202 BT_DBG("code 0x%2.2x", code
);
207 return hci_send_acl(conn
->hcon
, skb
, 0);
210 /* ---- Socket interface ---- */
211 static struct sock
*__l2cap_get_sock_by_addr(u16 psm
, bdaddr_t
*src
)
214 struct hlist_node
*node
;
215 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
216 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
223 /* Find socket with psm and source bdaddr.
224 * Returns closest match.
226 static struct sock
*__l2cap_get_sock_by_psm(int state
, u16 psm
, bdaddr_t
*src
)
228 struct sock
*sk
= NULL
, *sk1
= NULL
;
229 struct hlist_node
*node
;
231 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
232 if (state
&& sk
->sk_state
!= state
)
235 if (l2cap_pi(sk
)->psm
== psm
) {
237 if (!bacmp(&bt_sk(sk
)->src
, src
))
241 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
245 return node
? sk
: sk1
;
248 /* Find socket with given address (psm, src).
249 * Returns locked socket */
250 static inline struct sock
*l2cap_get_sock_by_psm(int state
, u16 psm
, bdaddr_t
*src
)
253 read_lock(&l2cap_sk_list
.lock
);
254 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
255 if (s
) bh_lock_sock(s
);
256 read_unlock(&l2cap_sk_list
.lock
);
260 static void l2cap_sock_destruct(struct sock
*sk
)
264 skb_queue_purge(&sk
->sk_receive_queue
);
265 skb_queue_purge(&sk
->sk_write_queue
);
268 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
272 BT_DBG("parent %p", parent
);
274 /* Close not yet accepted channels */
275 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
276 l2cap_sock_close(sk
);
278 parent
->sk_state
= BT_CLOSED
;
279 sock_set_flag(parent
, SOCK_ZAPPED
);
282 /* Kill socket (only if zapped and orphan)
283 * Must be called on unlocked socket.
285 static void l2cap_sock_kill(struct sock
*sk
)
287 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
290 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
292 /* Kill poor orphan */
293 bt_sock_unlink(&l2cap_sk_list
, sk
);
294 sock_set_flag(sk
, SOCK_DEAD
);
298 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
300 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
302 switch (sk
->sk_state
) {
304 l2cap_sock_cleanup_listen(sk
);
310 if (sk
->sk_type
== SOCK_SEQPACKET
) {
311 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
312 struct l2cap_disconn_req req
;
314 sk
->sk_state
= BT_DISCONN
;
315 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
317 req
.dcid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
318 req
.scid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
319 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
320 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
322 l2cap_chan_del(sk
, reason
);
328 l2cap_chan_del(sk
, reason
);
332 sock_set_flag(sk
, SOCK_ZAPPED
);
337 /* Must be called on unlocked socket. */
338 static void l2cap_sock_close(struct sock
*sk
)
340 l2cap_sock_clear_timer(sk
);
342 __l2cap_sock_close(sk
, ECONNRESET
);
347 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
349 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
354 sk
->sk_type
= parent
->sk_type
;
355 pi
->imtu
= l2cap_pi(parent
)->imtu
;
356 pi
->omtu
= l2cap_pi(parent
)->omtu
;
357 pi
->link_mode
= l2cap_pi(parent
)->link_mode
;
359 pi
->imtu
= L2CAP_DEFAULT_MTU
;
364 /* Default config options */
365 pi
->conf_mtu
= L2CAP_DEFAULT_MTU
;
366 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
369 static struct proto l2cap_proto
= {
371 .owner
= THIS_MODULE
,
372 .obj_size
= sizeof(struct l2cap_pinfo
)
375 static struct sock
*l2cap_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
379 sk
= sk_alloc(PF_BLUETOOTH
, prio
, &l2cap_proto
, 1);
383 sock_init_data(sock
, sk
);
384 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
386 sk
->sk_destruct
= l2cap_sock_destruct
;
387 sk
->sk_sndtimeo
= L2CAP_CONN_TIMEOUT
;
389 sock_reset_flag(sk
, SOCK_ZAPPED
);
391 sk
->sk_protocol
= proto
;
392 sk
->sk_state
= BT_OPEN
;
394 l2cap_sock_init_timer(sk
);
396 bt_sock_link(&l2cap_sk_list
, sk
);
400 static int l2cap_sock_create(struct socket
*sock
, int protocol
)
404 BT_DBG("sock %p", sock
);
406 sock
->state
= SS_UNCONNECTED
;
408 if (sock
->type
!= SOCK_SEQPACKET
&&
409 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
410 return -ESOCKTNOSUPPORT
;
412 if (sock
->type
== SOCK_RAW
&& !capable(CAP_NET_RAW
))
415 sock
->ops
= &l2cap_sock_ops
;
417 sk
= l2cap_sock_alloc(sock
, protocol
, GFP_KERNEL
);
421 l2cap_sock_init(sk
, NULL
);
425 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
427 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
428 struct sock
*sk
= sock
->sk
;
431 BT_DBG("sk %p, %s %d", sk
, batostr(&la
->l2_bdaddr
), la
->l2_psm
);
433 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
438 if (sk
->sk_state
!= BT_OPEN
) {
443 write_lock_bh(&l2cap_sk_list
.lock
);
445 if (la
->l2_psm
&& __l2cap_get_sock_by_addr(la
->l2_psm
, &la
->l2_bdaddr
)) {
448 /* Save source address */
449 bacpy(&bt_sk(sk
)->src
, &la
->l2_bdaddr
);
450 l2cap_pi(sk
)->psm
= la
->l2_psm
;
451 l2cap_pi(sk
)->sport
= la
->l2_psm
;
452 sk
->sk_state
= BT_BOUND
;
455 write_unlock_bh(&l2cap_sk_list
.lock
);
462 static int l2cap_do_connect(struct sock
*sk
)
464 bdaddr_t
*src
= &bt_sk(sk
)->src
;
465 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
466 struct l2cap_conn
*conn
;
467 struct hci_conn
*hcon
;
468 struct hci_dev
*hdev
;
471 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
), l2cap_pi(sk
)->psm
);
473 if (!(hdev
= hci_get_route(dst
, src
)))
474 return -EHOSTUNREACH
;
476 hci_dev_lock_bh(hdev
);
480 hcon
= hci_connect(hdev
, ACL_LINK
, dst
);
484 conn
= l2cap_conn_add(hcon
, 0);
492 /* Update source addr of the socket */
493 bacpy(src
, conn
->src
);
495 l2cap_chan_add(conn
, sk
, NULL
);
497 sk
->sk_state
= BT_CONNECT
;
498 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
500 if (hcon
->state
== BT_CONNECTED
) {
501 if (sk
->sk_type
== SOCK_SEQPACKET
) {
502 struct l2cap_conn_req req
;
503 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
504 req
.scid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
505 req
.psm
= l2cap_pi(sk
)->psm
;
506 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
507 L2CAP_CONN_REQ
, sizeof(req
), &req
);
509 l2cap_sock_clear_timer(sk
);
510 sk
->sk_state
= BT_CONNECTED
;
515 hci_dev_unlock_bh(hdev
);
520 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
522 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
523 struct sock
*sk
= sock
->sk
;
530 if (addr
->sa_family
!= AF_BLUETOOTH
|| alen
< sizeof(struct sockaddr_l2
)) {
535 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
->l2_psm
) {
540 switch(sk
->sk_state
) {
544 /* Already connecting */
548 /* Already connected */
561 /* Set destination address and psm */
562 bacpy(&bt_sk(sk
)->dst
, &la
->l2_bdaddr
);
563 l2cap_pi(sk
)->psm
= la
->l2_psm
;
565 if ((err
= l2cap_do_connect(sk
)))
569 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
570 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
576 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
578 struct sock
*sk
= sock
->sk
;
581 BT_DBG("sk %p backlog %d", sk
, backlog
);
585 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
590 if (!l2cap_pi(sk
)->psm
) {
591 bdaddr_t
*src
= &bt_sk(sk
)->src
;
596 write_lock_bh(&l2cap_sk_list
.lock
);
598 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
599 if (!__l2cap_get_sock_by_addr(psm
, src
)) {
600 l2cap_pi(sk
)->psm
= htobs(psm
);
601 l2cap_pi(sk
)->sport
= htobs(psm
);
606 write_unlock_bh(&l2cap_sk_list
.lock
);
612 sk
->sk_max_ack_backlog
= backlog
;
613 sk
->sk_ack_backlog
= 0;
614 sk
->sk_state
= BT_LISTEN
;
621 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
623 DECLARE_WAITQUEUE(wait
, current
);
624 struct sock
*sk
= sock
->sk
, *nsk
;
630 if (sk
->sk_state
!= BT_LISTEN
) {
635 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
637 BT_DBG("sk %p timeo %ld", sk
, timeo
);
639 /* Wait for an incoming connection. (wake-one). */
640 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
641 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
642 set_current_state(TASK_INTERRUPTIBLE
);
649 timeo
= schedule_timeout(timeo
);
652 if (sk
->sk_state
!= BT_LISTEN
) {
657 if (signal_pending(current
)) {
658 err
= sock_intr_errno(timeo
);
662 set_current_state(TASK_RUNNING
);
663 remove_wait_queue(sk
->sk_sleep
, &wait
);
668 newsock
->state
= SS_CONNECTED
;
670 BT_DBG("new socket %p", nsk
);
677 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
679 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
680 struct sock
*sk
= sock
->sk
;
682 BT_DBG("sock %p, sk %p", sock
, sk
);
684 addr
->sa_family
= AF_BLUETOOTH
;
685 *len
= sizeof(struct sockaddr_l2
);
688 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
690 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
692 la
->l2_psm
= l2cap_pi(sk
)->psm
;
696 static inline int l2cap_do_send(struct sock
*sk
, struct msghdr
*msg
, int len
)
698 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
699 struct sk_buff
*skb
, **frag
;
700 int err
, hlen
, count
, sent
=0;
701 struct l2cap_hdr
*lh
;
703 BT_DBG("sk %p len %d", sk
, len
);
705 /* First fragment (with L2CAP header) */
706 if (sk
->sk_type
== SOCK_DGRAM
)
707 hlen
= L2CAP_HDR_SIZE
+ 2;
709 hlen
= L2CAP_HDR_SIZE
;
711 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
713 skb
= bt_skb_send_alloc(sk
, hlen
+ count
,
714 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
718 /* Create L2CAP header */
719 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
720 lh
->cid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
721 lh
->len
= __cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
723 if (sk
->sk_type
== SOCK_DGRAM
)
724 put_unaligned(l2cap_pi(sk
)->psm
, (u16
*) skb_put(skb
, 2));
726 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
734 /* Continuation fragments (no L2CAP header) */
735 frag
= &skb_shinfo(skb
)->frag_list
;
737 count
= min_t(unsigned int, conn
->mtu
, len
);
739 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
743 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
)) {
751 frag
= &(*frag
)->next
;
754 if ((err
= hci_send_acl(conn
->hcon
, skb
, 0)) < 0)
764 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
766 struct sock
*sk
= sock
->sk
;
769 BT_DBG("sock %p, sk %p", sock
, sk
);
771 err
= sock_error(sk
);
775 if (msg
->msg_flags
& MSG_OOB
)
778 /* Check outgoing MTU */
779 if (sk
->sk_type
!= SOCK_RAW
&& len
> l2cap_pi(sk
)->omtu
)
784 if (sk
->sk_state
== BT_CONNECTED
)
785 err
= l2cap_do_send(sk
, msg
, len
);
793 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
)
795 struct sock
*sk
= sock
->sk
;
796 struct l2cap_options opts
;
806 len
= min_t(unsigned int, sizeof(opts
), optlen
);
807 if (copy_from_user((char *) &opts
, optval
, len
)) {
811 l2cap_pi(sk
)->imtu
= opts
.imtu
;
812 l2cap_pi(sk
)->omtu
= opts
.omtu
;
816 if (get_user(opt
, (u32 __user
*) optval
)) {
821 l2cap_pi(sk
)->link_mode
= opt
;
833 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
835 struct sock
*sk
= sock
->sk
;
836 struct l2cap_options opts
;
837 struct l2cap_conninfo cinfo
;
842 if (get_user(len
, optlen
))
849 opts
.imtu
= l2cap_pi(sk
)->imtu
;
850 opts
.omtu
= l2cap_pi(sk
)->omtu
;
851 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
854 len
= min_t(unsigned int, len
, sizeof(opts
));
855 if (copy_to_user(optval
, (char *) &opts
, len
))
861 if (put_user(l2cap_pi(sk
)->link_mode
, (u32 __user
*) optval
))
866 if (sk
->sk_state
!= BT_CONNECTED
) {
871 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
872 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
874 len
= min_t(unsigned int, len
, sizeof(cinfo
));
875 if (copy_to_user(optval
, (char *) &cinfo
, len
))
889 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
891 struct sock
*sk
= sock
->sk
;
894 BT_DBG("sock %p, sk %p", sock
, sk
);
900 if (!sk
->sk_shutdown
) {
901 sk
->sk_shutdown
= SHUTDOWN_MASK
;
902 l2cap_sock_clear_timer(sk
);
903 __l2cap_sock_close(sk
, 0);
905 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
906 err
= bt_sock_wait_state(sk
, BT_CLOSED
, sk
->sk_lingertime
);
912 static int l2cap_sock_release(struct socket
*sock
)
914 struct sock
*sk
= sock
->sk
;
917 BT_DBG("sock %p, sk %p", sock
, sk
);
922 err
= l2cap_sock_shutdown(sock
, 2);
929 /* ---- L2CAP channels ---- */
930 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
933 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
934 if (l2cap_pi(s
)->dcid
== cid
)
940 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
943 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
944 if (l2cap_pi(s
)->scid
== cid
)
950 /* Find channel with given SCID.
951 * Returns locked socket */
952 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
956 s
= __l2cap_get_chan_by_scid(l
, cid
);
957 if (s
) bh_lock_sock(s
);
958 read_unlock(&l
->lock
);
962 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
965 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
966 if (l2cap_pi(s
)->ident
== ident
)
972 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
976 s
= __l2cap_get_chan_by_ident(l
, ident
);
977 if (s
) bh_lock_sock(s
);
978 read_unlock(&l
->lock
);
982 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
986 for (; cid
< 0xffff; cid
++) {
987 if(!__l2cap_get_chan_by_scid(l
, cid
))
994 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
999 l2cap_pi(l
->head
)->prev_c
= sk
;
1001 l2cap_pi(sk
)->next_c
= l
->head
;
1002 l2cap_pi(sk
)->prev_c
= NULL
;
1006 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
1008 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
1010 write_lock(&l
->lock
);
1015 l2cap_pi(next
)->prev_c
= prev
;
1017 l2cap_pi(prev
)->next_c
= next
;
1018 write_unlock(&l
->lock
);
1023 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
1025 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1027 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
, l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
1029 l2cap_pi(sk
)->conn
= conn
;
1031 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1032 /* Alloc CID for connection-oriented socket */
1033 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
1034 } else if (sk
->sk_type
== SOCK_DGRAM
) {
1035 /* Connectionless socket */
1036 l2cap_pi(sk
)->scid
= 0x0002;
1037 l2cap_pi(sk
)->dcid
= 0x0002;
1038 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
1040 /* Raw socket can send/recv signalling messages only */
1041 l2cap_pi(sk
)->scid
= 0x0001;
1042 l2cap_pi(sk
)->dcid
= 0x0001;
1043 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
1046 __l2cap_chan_link(l
, sk
);
1049 bt_accept_enqueue(parent
, sk
);
1053 * Must be called on the locked socket. */
1054 static void l2cap_chan_del(struct sock
*sk
, int err
)
1056 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1057 struct sock
*parent
= bt_sk(sk
)->parent
;
1059 l2cap_sock_clear_timer(sk
);
1061 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
1064 /* Unlink from channel list */
1065 l2cap_chan_unlink(&conn
->chan_list
, sk
);
1066 l2cap_pi(sk
)->conn
= NULL
;
1067 hci_conn_put(conn
->hcon
);
1070 sk
->sk_state
= BT_CLOSED
;
1071 sock_set_flag(sk
, SOCK_ZAPPED
);
1077 bt_accept_unlink(sk
);
1078 parent
->sk_data_ready(parent
, 0);
1080 sk
->sk_state_change(sk
);
1083 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1085 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1088 BT_DBG("conn %p", conn
);
1090 read_lock(&l
->lock
);
1092 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1095 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1096 l2cap_sock_clear_timer(sk
);
1097 sk
->sk_state
= BT_CONNECTED
;
1098 sk
->sk_state_change(sk
);
1099 } else if (sk
->sk_state
== BT_CONNECT
) {
1100 struct l2cap_conn_req req
;
1101 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
1102 req
.scid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
1103 req
.psm
= l2cap_pi(sk
)->psm
;
1104 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1110 read_unlock(&l
->lock
);
1113 /* Notify sockets that we cannot guaranty reliability anymore */
1114 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1116 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1119 BT_DBG("conn %p", conn
);
1121 read_lock(&l
->lock
);
1122 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1123 if (l2cap_pi(sk
)->link_mode
& L2CAP_LM_RELIABLE
)
1126 read_unlock(&l
->lock
);
1129 static void l2cap_chan_ready(struct sock
*sk
)
1131 struct sock
*parent
= bt_sk(sk
)->parent
;
1133 BT_DBG("sk %p, parent %p", sk
, parent
);
1135 l2cap_pi(sk
)->conf_state
= 0;
1136 l2cap_sock_clear_timer(sk
);
1139 /* Outgoing channel.
1140 * Wake up socket sleeping on connect.
1142 sk
->sk_state
= BT_CONNECTED
;
1143 sk
->sk_state_change(sk
);
1145 /* Incoming channel.
1146 * Wake up socket sleeping on accept.
1148 parent
->sk_data_ready(parent
, 0);
1152 /* Copy frame to all raw sockets on that connection */
1153 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1155 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1156 struct sk_buff
*nskb
;
1159 BT_DBG("conn %p", conn
);
1161 read_lock(&l
->lock
);
1162 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1163 if (sk
->sk_type
!= SOCK_RAW
)
1166 /* Don't send frame to the socket it came from */
1170 if (!(nskb
= skb_clone(skb
, GFP_ATOMIC
)))
1173 if (sock_queue_rcv_skb(sk
, nskb
))
1176 read_unlock(&l
->lock
);
1179 /* ---- L2CAP signalling commands ---- */
1180 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1181 u8 code
, u8 ident
, u16 dlen
, void *data
)
1183 struct sk_buff
*skb
, **frag
;
1184 struct l2cap_cmd_hdr
*cmd
;
1185 struct l2cap_hdr
*lh
;
1188 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn
, code
, ident
, dlen
);
1190 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1191 count
= min_t(unsigned int, conn
->mtu
, len
);
1193 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1197 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1198 lh
->len
= __cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1199 lh
->cid
= __cpu_to_le16(0x0001);
1201 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1204 cmd
->len
= __cpu_to_le16(dlen
);
1207 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1208 memcpy(skb_put(skb
, count
), data
, count
);
1214 /* Continuation fragments (no L2CAP header) */
1215 frag
= &skb_shinfo(skb
)->frag_list
;
1217 count
= min_t(unsigned int, conn
->mtu
, len
);
1219 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1223 memcpy(skb_put(*frag
, count
), data
, count
);
1228 frag
= &(*frag
)->next
;
1238 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1240 struct l2cap_conf_opt
*opt
= *ptr
;
1243 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1251 *val
= *((u8
*) opt
->val
);
1255 *val
= __le16_to_cpu(*((u16
*)opt
->val
));
1259 *val
= __le32_to_cpu(*((u32
*)opt
->val
));
1263 *val
= (unsigned long) opt
->val
;
1267 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1271 static inline void l2cap_parse_conf_req(struct sock
*sk
, void *data
, int len
)
1273 int type
, hint
, olen
;
1277 BT_DBG("sk %p len %d", sk
, len
);
1279 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1280 len
-= l2cap_get_conf_opt(&ptr
, &type
, &olen
, &val
);
1286 case L2CAP_CONF_MTU
:
1287 l2cap_pi(sk
)->conf_mtu
= val
;
1290 case L2CAP_CONF_FLUSH_TO
:
1291 l2cap_pi(sk
)->flush_to
= val
;
1294 case L2CAP_CONF_QOS
:
1301 /* FIXME: Reject unknown option */
1307 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1309 struct l2cap_conf_opt
*opt
= *ptr
;
1311 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1318 *((u8
*) opt
->val
) = val
;
1322 *((u16
*) opt
->val
) = __cpu_to_le16(val
);
1326 *((u32
*) opt
->val
) = __cpu_to_le32(val
);
1330 memcpy(opt
->val
, (void *) val
, len
);
1334 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1337 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
1339 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1340 struct l2cap_conf_req
*req
= data
;
1341 void *ptr
= req
->data
;
1343 BT_DBG("sk %p", sk
);
1345 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
1346 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1348 /* FIXME: Need actual value of the flush timeout */
1349 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1350 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1352 req
->dcid
= __cpu_to_le16(pi
->dcid
);
1353 req
->flags
= __cpu_to_le16(0);
1358 static inline int l2cap_conf_output(struct sock
*sk
, void **ptr
)
1360 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1363 /* Configure output options and let the other side know
1364 * which ones we don't like. */
1365 if (pi
->conf_mtu
< pi
->omtu
) {
1366 l2cap_add_conf_opt(ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
1367 result
= L2CAP_CONF_UNACCEPT
;
1369 pi
->omtu
= pi
->conf_mtu
;
1372 BT_DBG("sk %p result %d", sk
, result
);
1376 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, int *result
)
1378 struct l2cap_conf_rsp
*rsp
= data
;
1379 void *ptr
= rsp
->data
;
1382 BT_DBG("sk %p complete %d", sk
, result
? 1 : 0);
1385 *result
= l2cap_conf_output(sk
, &ptr
);
1389 rsp
->scid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
1390 rsp
->result
= __cpu_to_le16(result
? *result
: 0);
1391 rsp
->flags
= __cpu_to_le16(flags
);
1396 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1398 struct l2cap_chan_list
*list
= &conn
->chan_list
;
1399 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
1400 struct l2cap_conn_rsp rsp
;
1401 struct sock
*sk
, *parent
;
1402 int result
= 0, status
= 0;
1404 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
1407 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
1409 /* Check if we have socket listening on psm */
1410 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
1412 result
= L2CAP_CR_BAD_PSM
;
1416 result
= L2CAP_CR_NO_MEM
;
1418 /* Check for backlog size */
1419 if (sk_acceptq_is_full(parent
)) {
1420 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1424 sk
= l2cap_sock_alloc(NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
1428 write_lock(&list
->lock
);
1430 /* Check if we already have channel with that dcid */
1431 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
1432 write_unlock(&list
->lock
);
1433 sock_set_flag(sk
, SOCK_ZAPPED
);
1434 l2cap_sock_kill(sk
);
1438 hci_conn_hold(conn
->hcon
);
1440 l2cap_sock_init(sk
, parent
);
1441 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1442 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1443 l2cap_pi(sk
)->psm
= psm
;
1444 l2cap_pi(sk
)->dcid
= scid
;
1446 __l2cap_chan_add(conn
, sk
, parent
);
1447 dcid
= l2cap_pi(sk
)->scid
;
1449 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1451 /* Service level security */
1452 result
= L2CAP_CR_PEND
;
1453 status
= L2CAP_CS_AUTHEN_PEND
;
1454 sk
->sk_state
= BT_CONNECT2
;
1455 l2cap_pi(sk
)->ident
= cmd
->ident
;
1457 if ((l2cap_pi(sk
)->link_mode
& L2CAP_LM_ENCRYPT
) ||
1458 (l2cap_pi(sk
)->link_mode
& L2CAP_LM_SECURE
)) {
1459 if (!hci_conn_encrypt(conn
->hcon
))
1461 } else if (l2cap_pi(sk
)->link_mode
& L2CAP_LM_AUTH
) {
1462 if (!hci_conn_auth(conn
->hcon
))
1466 sk
->sk_state
= BT_CONFIG
;
1467 result
= status
= 0;
1470 write_unlock(&list
->lock
);
1473 bh_unlock_sock(parent
);
1476 rsp
.scid
= __cpu_to_le16(scid
);
1477 rsp
.dcid
= __cpu_to_le16(dcid
);
1478 rsp
.result
= __cpu_to_le16(result
);
1479 rsp
.status
= __cpu_to_le16(status
);
1480 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1484 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1486 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
1487 u16 scid
, dcid
, result
, status
;
1491 scid
= __le16_to_cpu(rsp
->scid
);
1492 dcid
= __le16_to_cpu(rsp
->dcid
);
1493 result
= __le16_to_cpu(rsp
->result
);
1494 status
= __le16_to_cpu(rsp
->status
);
1496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
1499 if (!(sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
)))
1502 if (!(sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
)))
1507 case L2CAP_CR_SUCCESS
:
1508 sk
->sk_state
= BT_CONFIG
;
1509 l2cap_pi(sk
)->ident
= 0;
1510 l2cap_pi(sk
)->dcid
= dcid
;
1511 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1513 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1514 l2cap_build_conf_req(sk
, req
), req
);
1521 l2cap_chan_del(sk
, ECONNREFUSED
);
1529 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1531 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
1537 dcid
= __le16_to_cpu(req
->dcid
);
1538 flags
= __le16_to_cpu(req
->flags
);
1540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
1542 if (!(sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
)))
1545 l2cap_parse_conf_req(sk
, req
->data
, cmd
->len
- sizeof(*req
));
1547 if (flags
& 0x0001) {
1548 /* Incomplete config. Send empty response. */
1549 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
1550 l2cap_build_conf_rsp(sk
, rsp
, NULL
), rsp
);
1554 /* Complete config. */
1555 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
1556 l2cap_build_conf_rsp(sk
, rsp
, &result
), rsp
);
1561 /* Output config done */
1562 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
1564 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
1565 sk
->sk_state
= BT_CONNECTED
;
1566 l2cap_chan_ready(sk
);
1567 } else if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
1569 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1570 l2cap_build_conf_req(sk
, req
), req
);
1578 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1580 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
1581 u16 scid
, flags
, result
;
1584 scid
= __le16_to_cpu(rsp
->scid
);
1585 flags
= __le16_to_cpu(rsp
->flags
);
1586 result
= __le16_to_cpu(rsp
->result
);
1588 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid
, flags
, result
);
1590 if (!(sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
)))
1594 case L2CAP_CONF_SUCCESS
:
1597 case L2CAP_CONF_UNACCEPT
:
1598 if (++l2cap_pi(sk
)->conf_retry
< L2CAP_CONF_MAX_RETRIES
) {
1600 /* It does not make sense to adjust L2CAP parameters
1601 * that are currently defined in the spec. We simply
1602 * resend config request that we sent earlier. It is
1603 * stupid, but it helps qualification testing which
1604 * expects at least some response from us. */
1605 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1606 l2cap_build_conf_req(sk
, req
), req
);
1611 sk
->sk_state
= BT_DISCONN
;
1612 sk
->sk_err
= ECONNRESET
;
1613 l2cap_sock_set_timer(sk
, HZ
* 5);
1615 struct l2cap_disconn_req req
;
1616 req
.dcid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
1617 req
.scid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
1618 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1619 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1627 /* Input config done */
1628 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
1630 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
1631 sk
->sk_state
= BT_CONNECTED
;
1632 l2cap_chan_ready(sk
);
1640 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1642 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
1643 struct l2cap_disconn_rsp rsp
;
1647 scid
= __le16_to_cpu(req
->scid
);
1648 dcid
= __le16_to_cpu(req
->dcid
);
1650 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
1652 if (!(sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
)))
1655 rsp
.dcid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
1656 rsp
.scid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
1657 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
1659 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1661 l2cap_chan_del(sk
, ECONNRESET
);
1664 l2cap_sock_kill(sk
);
1668 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1670 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
1674 scid
= __le16_to_cpu(rsp
->scid
);
1675 dcid
= __le16_to_cpu(rsp
->dcid
);
1677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
1679 if (!(sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
)))
1682 l2cap_chan_del(sk
, 0);
1685 l2cap_sock_kill(sk
);
1689 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1691 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
1692 struct l2cap_info_rsp rsp
;
1695 type
= __le16_to_cpu(req
->type
);
1697 BT_DBG("type 0x%4.4x", type
);
1699 rsp
.type
= __cpu_to_le16(type
);
1700 rsp
.result
= __cpu_to_le16(L2CAP_IR_NOTSUPP
);
1701 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
1706 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
1708 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
1711 type
= __le16_to_cpu(rsp
->type
);
1712 result
= __le16_to_cpu(rsp
->result
);
1714 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
1719 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1721 u8
*data
= skb
->data
;
1723 struct l2cap_cmd_hdr cmd
;
1726 l2cap_raw_recv(conn
, skb
);
1728 while (len
>= L2CAP_CMD_HDR_SIZE
) {
1729 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
1730 data
+= L2CAP_CMD_HDR_SIZE
;
1731 len
-= L2CAP_CMD_HDR_SIZE
;
1733 cmd
.len
= __le16_to_cpu(cmd
.len
);
1735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd
.len
, cmd
.ident
);
1737 if (cmd
.len
> len
|| !cmd
.ident
) {
1738 BT_DBG("corrupted command");
1743 case L2CAP_COMMAND_REJ
:
1744 /* FIXME: We should process this */
1747 case L2CAP_CONN_REQ
:
1748 err
= l2cap_connect_req(conn
, &cmd
, data
);
1751 case L2CAP_CONN_RSP
:
1752 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
1755 case L2CAP_CONF_REQ
:
1756 err
= l2cap_config_req(conn
, &cmd
, data
);
1759 case L2CAP_CONF_RSP
:
1760 err
= l2cap_config_rsp(conn
, &cmd
, data
);
1763 case L2CAP_DISCONN_REQ
:
1764 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
1767 case L2CAP_DISCONN_RSP
:
1768 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
1771 case L2CAP_ECHO_REQ
:
1772 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd
.len
, data
);
1775 case L2CAP_ECHO_RSP
:
1778 case L2CAP_INFO_REQ
:
1779 err
= l2cap_information_req(conn
, &cmd
, data
);
1782 case L2CAP_INFO_RSP
:
1783 err
= l2cap_information_rsp(conn
, &cmd
, data
);
1787 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
1793 struct l2cap_cmd_rej rej
;
1794 BT_DBG("error %d", err
);
1796 /* FIXME: Map err to a valid reason */
1797 rej
.reason
= __cpu_to_le16(0);
1798 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
1808 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
1812 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
1814 BT_DBG("unknown cid 0x%4.4x", cid
);
1818 BT_DBG("sk %p, len %d", sk
, skb
->len
);
1820 if (sk
->sk_state
!= BT_CONNECTED
)
1823 if (l2cap_pi(sk
)->imtu
< skb
->len
)
1826 /* If socket recv buffers overflows we drop data here
1827 * which is *bad* because L2CAP has to be reliable.
1828 * But we don't have any other choice. L2CAP doesn't
1829 * provide flow control mechanism. */
1831 if (!sock_queue_rcv_skb(sk
, skb
))
1838 if (sk
) bh_unlock_sock(sk
);
1842 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, u16 psm
, struct sk_buff
*skb
)
1846 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
1850 BT_DBG("sk %p, len %d", sk
, skb
->len
);
1852 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
1855 if (l2cap_pi(sk
)->imtu
< skb
->len
)
1858 if (!sock_queue_rcv_skb(sk
, skb
))
1865 if (sk
) bh_unlock_sock(sk
);
1869 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1871 struct l2cap_hdr
*lh
= (void *) skb
->data
;
1874 skb_pull(skb
, L2CAP_HDR_SIZE
);
1875 cid
= __le16_to_cpu(lh
->cid
);
1876 len
= __le16_to_cpu(lh
->len
);
1878 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
1882 l2cap_sig_channel(conn
, skb
);
1886 psm
= get_unaligned((u16
*) skb
->data
);
1888 l2cap_conless_channel(conn
, psm
, skb
);
1892 l2cap_data_channel(conn
, cid
, skb
);
1897 /* ---- L2CAP interface with lower layer (HCI) ---- */
1899 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1901 int exact
= 0, lm1
= 0, lm2
= 0;
1902 register struct sock
*sk
;
1903 struct hlist_node
*node
;
1905 if (type
!= ACL_LINK
)
1908 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
1910 /* Find listening sockets and check their link_mode */
1911 read_lock(&l2cap_sk_list
.lock
);
1912 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
1913 if (sk
->sk_state
!= BT_LISTEN
)
1916 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
1917 lm1
|= (HCI_LM_ACCEPT
| l2cap_pi(sk
)->link_mode
);
1919 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1920 lm2
|= (HCI_LM_ACCEPT
| l2cap_pi(sk
)->link_mode
);
1922 read_unlock(&l2cap_sk_list
.lock
);
1924 return exact
? lm1
: lm2
;
1927 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
1929 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
1931 if (hcon
->type
!= ACL_LINK
)
1935 struct l2cap_conn
*conn
;
1937 conn
= l2cap_conn_add(hcon
, status
);
1939 l2cap_conn_ready(conn
);
1941 l2cap_conn_del(hcon
, bt_err(status
));
1946 static int l2cap_disconn_ind(struct hci_conn
*hcon
, u8 reason
)
1948 BT_DBG("hcon %p reason %d", hcon
, reason
);
1950 if (hcon
->type
!= ACL_LINK
)
1953 l2cap_conn_del(hcon
, bt_err(reason
));
1957 static int l2cap_auth_cfm(struct hci_conn
*hcon
, u8 status
)
1959 struct l2cap_chan_list
*l
;
1960 struct l2cap_conn
*conn
;
1961 struct l2cap_conn_rsp rsp
;
1965 if (!(conn
= hcon
->l2cap_data
))
1967 l
= &conn
->chan_list
;
1969 BT_DBG("conn %p", conn
);
1971 read_lock(&l
->lock
);
1973 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1976 if (sk
->sk_state
!= BT_CONNECT2
||
1977 (l2cap_pi(sk
)->link_mode
& L2CAP_LM_ENCRYPT
) ||
1978 (l2cap_pi(sk
)->link_mode
& L2CAP_LM_SECURE
)) {
1984 sk
->sk_state
= BT_CONFIG
;
1987 sk
->sk_state
= BT_DISCONN
;
1988 l2cap_sock_set_timer(sk
, HZ
/10);
1989 result
= L2CAP_CR_SEC_BLOCK
;
1992 rsp
.scid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
1993 rsp
.dcid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
1994 rsp
.result
= __cpu_to_le16(result
);
1995 rsp
.status
= __cpu_to_le16(0);
1996 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
1997 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2002 read_unlock(&l
->lock
);
2006 static int l2cap_encrypt_cfm(struct hci_conn
*hcon
, u8 status
)
2008 struct l2cap_chan_list
*l
;
2009 struct l2cap_conn
*conn
;
2010 struct l2cap_conn_rsp rsp
;
2014 if (!(conn
= hcon
->l2cap_data
))
2016 l
= &conn
->chan_list
;
2018 BT_DBG("conn %p", conn
);
2020 read_lock(&l
->lock
);
2022 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2025 if (sk
->sk_state
!= BT_CONNECT2
) {
2031 sk
->sk_state
= BT_CONFIG
;
2034 sk
->sk_state
= BT_DISCONN
;
2035 l2cap_sock_set_timer(sk
, HZ
/10);
2036 result
= L2CAP_CR_SEC_BLOCK
;
2039 rsp
.scid
= __cpu_to_le16(l2cap_pi(sk
)->dcid
);
2040 rsp
.dcid
= __cpu_to_le16(l2cap_pi(sk
)->scid
);
2041 rsp
.result
= __cpu_to_le16(result
);
2042 rsp
.status
= __cpu_to_le16(0);
2043 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
2044 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2046 if (l2cap_pi(sk
)->link_mode
& L2CAP_LM_SECURE
)
2047 hci_conn_change_link_key(hcon
);
2052 read_unlock(&l
->lock
);
2056 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
2058 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
2060 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
2063 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
2065 if (flags
& ACL_START
) {
2066 struct l2cap_hdr
*hdr
;
2070 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
2071 kfree_skb(conn
->rx_skb
);
2072 conn
->rx_skb
= NULL
;
2074 l2cap_conn_unreliable(conn
, ECOMM
);
2078 BT_ERR("Frame is too short (len %d)", skb
->len
);
2079 l2cap_conn_unreliable(conn
, ECOMM
);
2083 hdr
= (struct l2cap_hdr
*) skb
->data
;
2084 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
2086 if (len
== skb
->len
) {
2087 /* Complete frame received */
2088 l2cap_recv_frame(conn
, skb
);
2092 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
2094 if (skb
->len
> len
) {
2095 BT_ERR("Frame is too long (len %d, expected len %d)",
2097 l2cap_conn_unreliable(conn
, ECOMM
);
2101 /* Allocate skb for the complete frame (with header) */
2102 if (!(conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
)))
2105 memcpy(skb_put(conn
->rx_skb
, skb
->len
), skb
->data
, skb
->len
);
2106 conn
->rx_len
= len
- skb
->len
;
2108 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
2110 if (!conn
->rx_len
) {
2111 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
2112 l2cap_conn_unreliable(conn
, ECOMM
);
2116 if (skb
->len
> conn
->rx_len
) {
2117 BT_ERR("Fragment is too long (len %d, expected %d)",
2118 skb
->len
, conn
->rx_len
);
2119 kfree_skb(conn
->rx_skb
);
2120 conn
->rx_skb
= NULL
;
2122 l2cap_conn_unreliable(conn
, ECOMM
);
2126 memcpy(skb_put(conn
->rx_skb
, skb
->len
), skb
->data
, skb
->len
);
2127 conn
->rx_len
-= skb
->len
;
2129 if (!conn
->rx_len
) {
2130 /* Complete frame received */
2131 l2cap_recv_frame(conn
, conn
->rx_skb
);
2132 conn
->rx_skb
= NULL
;
2141 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
2144 struct hlist_node
*node
;
2147 read_lock_bh(&l2cap_sk_list
.lock
);
2149 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
2150 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2152 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2153 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
2154 sk
->sk_state
, pi
->psm
, pi
->scid
, pi
->dcid
, pi
->imtu
,
2155 pi
->omtu
, pi
->link_mode
);
2158 read_unlock_bh(&l2cap_sk_list
.lock
);
2163 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
2165 static const struct proto_ops l2cap_sock_ops
= {
2166 .family
= PF_BLUETOOTH
,
2167 .owner
= THIS_MODULE
,
2168 .release
= l2cap_sock_release
,
2169 .bind
= l2cap_sock_bind
,
2170 .connect
= l2cap_sock_connect
,
2171 .listen
= l2cap_sock_listen
,
2172 .accept
= l2cap_sock_accept
,
2173 .getname
= l2cap_sock_getname
,
2174 .sendmsg
= l2cap_sock_sendmsg
,
2175 .recvmsg
= bt_sock_recvmsg
,
2176 .poll
= bt_sock_poll
,
2177 .mmap
= sock_no_mmap
,
2178 .socketpair
= sock_no_socketpair
,
2179 .ioctl
= sock_no_ioctl
,
2180 .shutdown
= l2cap_sock_shutdown
,
2181 .setsockopt
= l2cap_sock_setsockopt
,
2182 .getsockopt
= l2cap_sock_getsockopt
2185 static struct net_proto_family l2cap_sock_family_ops
= {
2186 .family
= PF_BLUETOOTH
,
2187 .owner
= THIS_MODULE
,
2188 .create
= l2cap_sock_create
,
2191 static struct hci_proto l2cap_hci_proto
= {
2193 .id
= HCI_PROTO_L2CAP
,
2194 .connect_ind
= l2cap_connect_ind
,
2195 .connect_cfm
= l2cap_connect_cfm
,
2196 .disconn_ind
= l2cap_disconn_ind
,
2197 .auth_cfm
= l2cap_auth_cfm
,
2198 .encrypt_cfm
= l2cap_encrypt_cfm
,
2199 .recv_acldata
= l2cap_recv_acldata
2202 static int __init
l2cap_init(void)
2206 err
= proto_register(&l2cap_proto
, 0);
2210 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
2212 BT_ERR("L2CAP socket registration failed");
2216 err
= hci_register_proto(&l2cap_hci_proto
);
2218 BT_ERR("L2CAP protocol registration failed");
2219 bt_sock_unregister(BTPROTO_L2CAP
);
2223 class_create_file(&bt_class
, &class_attr_l2cap
);
2225 BT_INFO("L2CAP ver %s", VERSION
);
2226 BT_INFO("L2CAP socket layer initialized");
2231 proto_unregister(&l2cap_proto
);
2235 static void __exit
l2cap_exit(void)
2237 class_remove_file(&bt_class
, &class_attr_l2cap
);
2239 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
2240 BT_ERR("L2CAP socket unregistration failed");
2242 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
2243 BT_ERR("L2CAP protocol unregistration failed");
2245 proto_unregister(&l2cap_proto
);
2248 void l2cap_load(void)
2250 /* Dummy function to trigger automatic L2CAP module loading by
2251 * other modules that use L2CAP sockets but don't use any other
2252 * symbols from it. */
2255 EXPORT_SYMBOL(l2cap_load
);
2257 module_init(l2cap_init
);
2258 module_exit(l2cap_exit
);
2260 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2261 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
2262 MODULE_VERSION(VERSION
);
2263 MODULE_LICENSE("GPL");
2264 MODULE_ALIAS("bt-proto-0");