2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm
= 0;
57 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
59 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
60 static u8 l2cap_fixed_chan
[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops
;
64 static struct bt_sock_list l2cap_sk_list
= {
65 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
68 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
69 static void l2cap_sock_close(struct sock
*sk
);
70 static void l2cap_sock_kill(struct sock
*sk
);
72 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
73 u8 code
, u8 ident
, u16 dlen
, void *data
);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg
)
78 struct sock
*sk
= (struct sock
*) arg
;
81 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
85 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
86 reason
= ECONNREFUSED
;
87 else if (sk
->sk_state
== BT_CONNECT
&&
88 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
89 reason
= ECONNREFUSED
;
93 __l2cap_sock_close(sk
, reason
);
101 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
103 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
104 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
107 static void l2cap_sock_clear_timer(struct sock
*sk
)
109 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
110 sk_stop_timer(sk
, &sk
->sk_timer
);
113 /* ---- L2CAP channels ---- */
114 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
117 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
118 if (l2cap_pi(s
)->dcid
== cid
)
124 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
127 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
128 if (l2cap_pi(s
)->scid
== cid
)
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
140 s
= __l2cap_get_chan_by_scid(l
, cid
);
143 read_unlock(&l
->lock
);
147 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
150 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
151 if (l2cap_pi(s
)->ident
== ident
)
157 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
161 s
= __l2cap_get_chan_by_ident(l
, ident
);
164 read_unlock(&l
->lock
);
168 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
170 u16 cid
= L2CAP_CID_DYN_START
;
172 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
173 if (!__l2cap_get_chan_by_scid(l
, cid
))
180 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
185 l2cap_pi(l
->head
)->prev_c
= sk
;
187 l2cap_pi(sk
)->next_c
= l
->head
;
188 l2cap_pi(sk
)->prev_c
= NULL
;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
194 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
196 write_lock_bh(&l
->lock
);
201 l2cap_pi(next
)->prev_c
= prev
;
203 l2cap_pi(prev
)->next_c
= next
;
204 write_unlock_bh(&l
->lock
);
209 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
211 struct l2cap_chan_list
*l
= &conn
->chan_list
;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
214 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
216 conn
->disc_reason
= 0x13;
218 l2cap_pi(sk
)->conn
= conn
;
220 if (sk
->sk_type
== SOCK_SEQPACKET
) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
223 } else if (sk
->sk_type
== SOCK_DGRAM
) {
224 /* Connectionless socket */
225 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
226 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
227 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
235 __l2cap_chan_link(l
, sk
);
238 bt_accept_enqueue(parent
, sk
);
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock
*sk
, int err
)
245 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
246 struct sock
*parent
= bt_sk(sk
)->parent
;
248 l2cap_sock_clear_timer(sk
);
250 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn
->chan_list
, sk
);
255 l2cap_pi(sk
)->conn
= NULL
;
256 hci_conn_put(conn
->hcon
);
259 sk
->sk_state
= BT_CLOSED
;
260 sock_set_flag(sk
, SOCK_ZAPPED
);
266 bt_accept_unlink(sk
);
267 parent
->sk_data_ready(parent
, 0);
269 sk
->sk_state_change(sk
);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock
*sk
)
275 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
278 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
280 auth_type
= HCI_AT_NO_BONDING_MITM
;
282 auth_type
= HCI_AT_NO_BONDING
;
284 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
285 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
287 switch (l2cap_pi(sk
)->sec_level
) {
288 case BT_SECURITY_HIGH
:
289 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
291 case BT_SECURITY_MEDIUM
:
292 auth_type
= HCI_AT_GENERAL_BONDING
;
295 auth_type
= HCI_AT_NO_BONDING
;
300 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
304 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn
->lock
);
316 if (++conn
->tx_ident
> 128)
321 spin_unlock_bh(&conn
->lock
);
326 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
328 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
330 BT_DBG("code 0x%2.2x", code
);
335 return hci_send_acl(conn
->hcon
, skb
, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
341 struct l2cap_hdr
*lh
;
342 struct l2cap_conn
*conn
= pi
->conn
;
343 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
345 if (pi
->fcs
== L2CAP_FCS_CRC16
)
348 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
350 count
= min_t(unsigned int, conn
->mtu
, hlen
);
351 control
|= L2CAP_CTRL_FRAME_TYPE
;
353 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
357 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
358 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
359 lh
->cid
= cpu_to_le16(pi
->dcid
);
360 put_unaligned_le16(control
, skb_put(skb
, 2));
362 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
363 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
364 put_unaligned_le16(fcs
, skb_put(skb
, 2));
367 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
372 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
373 control
|= L2CAP_SUPER_RCV_NOT_READY
;
375 control
|= L2CAP_SUPER_RCV_READY
;
377 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
379 return l2cap_send_sframe(pi
, control
);
382 static void l2cap_do_start(struct sock
*sk
)
384 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
386 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
387 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
390 if (l2cap_check_security(sk
)) {
391 struct l2cap_conn_req req
;
392 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
393 req
.psm
= l2cap_pi(sk
)->psm
;
395 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
397 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
398 L2CAP_CONN_REQ
, sizeof(req
), &req
);
401 struct l2cap_info_req req
;
402 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
404 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
405 conn
->info_ident
= l2cap_get_ident(conn
);
407 mod_timer(&conn
->info_timer
, jiffies
+
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
410 l2cap_send_cmd(conn
, conn
->info_ident
,
411 L2CAP_INFO_REQ
, sizeof(req
), &req
);
415 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
417 struct l2cap_disconn_req req
;
419 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
420 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
421 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
422 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn
*conn
)
428 struct l2cap_chan_list
*l
= &conn
->chan_list
;
431 BT_DBG("conn %p", conn
);
435 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
438 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
443 if (sk
->sk_state
== BT_CONNECT
) {
444 if (l2cap_check_security(sk
)) {
445 struct l2cap_conn_req req
;
446 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
447 req
.psm
= l2cap_pi(sk
)->psm
;
449 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
451 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
452 L2CAP_CONN_REQ
, sizeof(req
), &req
);
454 } else if (sk
->sk_state
== BT_CONNECT2
) {
455 struct l2cap_conn_rsp rsp
;
456 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
457 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
459 if (l2cap_check_security(sk
)) {
460 if (bt_sk(sk
)->defer_setup
) {
461 struct sock
*parent
= bt_sk(sk
)->parent
;
462 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
463 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
464 parent
->sk_data_ready(parent
, 0);
467 sk
->sk_state
= BT_CONFIG
;
468 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
469 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
472 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
473 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
476 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
477 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
483 read_unlock(&l
->lock
);
486 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
488 struct l2cap_chan_list
*l
= &conn
->chan_list
;
491 BT_DBG("conn %p", conn
);
495 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
498 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
499 l2cap_sock_clear_timer(sk
);
500 sk
->sk_state
= BT_CONNECTED
;
501 sk
->sk_state_change(sk
);
502 } else if (sk
->sk_state
== BT_CONNECT
)
508 read_unlock(&l
->lock
);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
514 struct l2cap_chan_list
*l
= &conn
->chan_list
;
517 BT_DBG("conn %p", conn
);
521 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
522 if (l2cap_pi(sk
)->force_reliable
)
526 read_unlock(&l
->lock
);
529 static void l2cap_info_timeout(unsigned long arg
)
531 struct l2cap_conn
*conn
= (void *) arg
;
533 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
534 conn
->info_ident
= 0;
536 l2cap_conn_start(conn
);
539 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
541 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
546 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
550 hcon
->l2cap_data
= conn
;
553 BT_DBG("hcon %p conn %p", hcon
, conn
);
555 conn
->mtu
= hcon
->hdev
->acl_mtu
;
556 conn
->src
= &hcon
->hdev
->bdaddr
;
557 conn
->dst
= &hcon
->dst
;
561 spin_lock_init(&conn
->lock
);
562 rwlock_init(&conn
->chan_list
.lock
);
564 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
565 (unsigned long) conn
);
567 conn
->disc_reason
= 0x13;
572 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
574 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
580 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
582 kfree_skb(conn
->rx_skb
);
585 while ((sk
= conn
->chan_list
.head
)) {
587 l2cap_chan_del(sk
, err
);
592 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
593 del_timer_sync(&conn
->info_timer
);
595 hcon
->l2cap_data
= NULL
;
599 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
601 struct l2cap_chan_list
*l
= &conn
->chan_list
;
602 write_lock_bh(&l
->lock
);
603 __l2cap_chan_add(conn
, sk
, parent
);
604 write_unlock_bh(&l
->lock
);
607 /* ---- Socket interface ---- */
608 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
611 struct hlist_node
*node
;
612 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
613 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
625 struct sock
*sk
= NULL
, *sk1
= NULL
;
626 struct hlist_node
*node
;
628 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
629 if (state
&& sk
->sk_state
!= state
)
632 if (l2cap_pi(sk
)->psm
== psm
) {
634 if (!bacmp(&bt_sk(sk
)->src
, src
))
638 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
642 return node
? sk
: sk1
;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
650 read_lock(&l2cap_sk_list
.lock
);
651 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
654 read_unlock(&l2cap_sk_list
.lock
);
658 static void l2cap_sock_destruct(struct sock
*sk
)
662 skb_queue_purge(&sk
->sk_receive_queue
);
663 skb_queue_purge(&sk
->sk_write_queue
);
666 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
670 BT_DBG("parent %p", parent
);
672 /* Close not yet accepted channels */
673 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
674 l2cap_sock_close(sk
);
676 parent
->sk_state
= BT_CLOSED
;
677 sock_set_flag(parent
, SOCK_ZAPPED
);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock
*sk
)
685 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
688 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list
, sk
);
692 sock_set_flag(sk
, SOCK_DEAD
);
696 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
698 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
700 switch (sk
->sk_state
) {
702 l2cap_sock_cleanup_listen(sk
);
707 if (sk
->sk_type
== SOCK_SEQPACKET
) {
708 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
710 sk
->sk_state
= BT_DISCONN
;
711 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
712 l2cap_send_disconn_req(conn
, sk
);
714 l2cap_chan_del(sk
, reason
);
718 if (sk
->sk_type
== SOCK_SEQPACKET
) {
719 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
720 struct l2cap_conn_rsp rsp
;
723 if (bt_sk(sk
)->defer_setup
)
724 result
= L2CAP_CR_SEC_BLOCK
;
726 result
= L2CAP_CR_BAD_PSM
;
728 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
729 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
730 rsp
.result
= cpu_to_le16(result
);
731 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
732 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
733 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
735 l2cap_chan_del(sk
, reason
);
740 l2cap_chan_del(sk
, reason
);
744 sock_set_flag(sk
, SOCK_ZAPPED
);
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock
*sk
)
752 l2cap_sock_clear_timer(sk
);
754 __l2cap_sock_close(sk
, ECONNRESET
);
759 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
761 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
766 sk
->sk_type
= parent
->sk_type
;
767 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
769 pi
->imtu
= l2cap_pi(parent
)->imtu
;
770 pi
->omtu
= l2cap_pi(parent
)->omtu
;
771 pi
->mode
= l2cap_pi(parent
)->mode
;
772 pi
->fcs
= l2cap_pi(parent
)->fcs
;
773 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
774 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
775 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
777 pi
->imtu
= L2CAP_DEFAULT_MTU
;
779 pi
->mode
= L2CAP_MODE_BASIC
;
780 pi
->fcs
= L2CAP_FCS_CRC16
;
781 pi
->sec_level
= BT_SECURITY_LOW
;
783 pi
->force_reliable
= 0;
786 /* Default config options */
788 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
789 skb_queue_head_init(TX_QUEUE(sk
));
790 skb_queue_head_init(SREJ_QUEUE(sk
));
791 INIT_LIST_HEAD(SREJ_LIST(sk
));
794 static struct proto l2cap_proto
= {
796 .owner
= THIS_MODULE
,
797 .obj_size
= sizeof(struct l2cap_pinfo
)
800 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
804 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
808 sock_init_data(sock
, sk
);
809 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
811 sk
->sk_destruct
= l2cap_sock_destruct
;
812 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
814 sock_reset_flag(sk
, SOCK_ZAPPED
);
816 sk
->sk_protocol
= proto
;
817 sk
->sk_state
= BT_OPEN
;
819 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
821 bt_sock_link(&l2cap_sk_list
, sk
);
825 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
830 BT_DBG("sock %p", sock
);
832 sock
->state
= SS_UNCONNECTED
;
834 if (sock
->type
!= SOCK_SEQPACKET
&&
835 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
836 return -ESOCKTNOSUPPORT
;
838 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
841 sock
->ops
= &l2cap_sock_ops
;
843 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
847 l2cap_sock_init(sk
, NULL
);
851 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
853 struct sock
*sk
= sock
->sk
;
854 struct sockaddr_l2 la
;
859 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
862 memset(&la
, 0, sizeof(la
));
863 len
= min_t(unsigned int, sizeof(la
), alen
);
864 memcpy(&la
, addr
, len
);
871 if (sk
->sk_state
!= BT_OPEN
) {
876 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE
)) {
882 write_lock_bh(&l2cap_sk_list
.lock
);
884 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
887 /* Save source address */
888 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
889 l2cap_pi(sk
)->psm
= la
.l2_psm
;
890 l2cap_pi(sk
)->sport
= la
.l2_psm
;
891 sk
->sk_state
= BT_BOUND
;
893 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
894 __le16_to_cpu(la
.l2_psm
) == 0x0003)
895 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
898 write_unlock_bh(&l2cap_sk_list
.lock
);
905 static int l2cap_do_connect(struct sock
*sk
)
907 bdaddr_t
*src
= &bt_sk(sk
)->src
;
908 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
909 struct l2cap_conn
*conn
;
910 struct hci_conn
*hcon
;
911 struct hci_dev
*hdev
;
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
918 hdev
= hci_get_route(dst
, src
);
920 return -EHOSTUNREACH
;
922 hci_dev_lock_bh(hdev
);
926 if (sk
->sk_type
== SOCK_RAW
) {
927 switch (l2cap_pi(sk
)->sec_level
) {
928 case BT_SECURITY_HIGH
:
929 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
931 case BT_SECURITY_MEDIUM
:
932 auth_type
= HCI_AT_DEDICATED_BONDING
;
935 auth_type
= HCI_AT_NO_BONDING
;
938 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
940 auth_type
= HCI_AT_NO_BONDING_MITM
;
942 auth_type
= HCI_AT_NO_BONDING
;
944 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
945 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
947 switch (l2cap_pi(sk
)->sec_level
) {
948 case BT_SECURITY_HIGH
:
949 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
951 case BT_SECURITY_MEDIUM
:
952 auth_type
= HCI_AT_GENERAL_BONDING
;
955 auth_type
= HCI_AT_NO_BONDING
;
960 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
961 l2cap_pi(sk
)->sec_level
, auth_type
);
965 conn
= l2cap_conn_add(hcon
, 0);
973 /* Update source addr of the socket */
974 bacpy(src
, conn
->src
);
976 l2cap_chan_add(conn
, sk
, NULL
);
978 sk
->sk_state
= BT_CONNECT
;
979 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
981 if (hcon
->state
== BT_CONNECTED
) {
982 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
983 l2cap_sock_clear_timer(sk
);
984 sk
->sk_state
= BT_CONNECTED
;
990 hci_dev_unlock_bh(hdev
);
995 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
997 struct sock
*sk
= sock
->sk
;
998 struct sockaddr_l2 la
;
1001 BT_DBG("sk %p", sk
);
1003 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1006 memset(&la
, 0, sizeof(la
));
1007 len
= min_t(unsigned int, sizeof(la
), alen
);
1008 memcpy(&la
, addr
, len
);
1015 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1020 switch (l2cap_pi(sk
)->mode
) {
1021 case L2CAP_MODE_BASIC
:
1023 case L2CAP_MODE_ERTM
:
1024 case L2CAP_MODE_STREAMING
:
1033 switch (sk
->sk_state
) {
1037 /* Already connecting */
1041 /* Already connected */
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1056 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1058 err
= l2cap_do_connect(sk
);
1063 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1064 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1070 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1072 struct sock
*sk
= sock
->sk
;
1075 BT_DBG("sk %p backlog %d", sk
, backlog
);
1079 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1084 switch (l2cap_pi(sk
)->mode
) {
1085 case L2CAP_MODE_BASIC
:
1087 case L2CAP_MODE_ERTM
:
1088 case L2CAP_MODE_STREAMING
:
1097 if (!l2cap_pi(sk
)->psm
) {
1098 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1103 write_lock_bh(&l2cap_sk_list
.lock
);
1105 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1107 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1108 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1113 write_unlock_bh(&l2cap_sk_list
.lock
);
1119 sk
->sk_max_ack_backlog
= backlog
;
1120 sk
->sk_ack_backlog
= 0;
1121 sk
->sk_state
= BT_LISTEN
;
1128 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1130 DECLARE_WAITQUEUE(wait
, current
);
1131 struct sock
*sk
= sock
->sk
, *nsk
;
1135 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1137 if (sk
->sk_state
!= BT_LISTEN
) {
1142 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1144 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
1148 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1149 set_current_state(TASK_INTERRUPTIBLE
);
1156 timeo
= schedule_timeout(timeo
);
1157 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1159 if (sk
->sk_state
!= BT_LISTEN
) {
1164 if (signal_pending(current
)) {
1165 err
= sock_intr_errno(timeo
);
1169 set_current_state(TASK_RUNNING
);
1170 remove_wait_queue(sk
->sk_sleep
, &wait
);
1175 newsock
->state
= SS_CONNECTED
;
1177 BT_DBG("new socket %p", nsk
);
1184 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1186 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1187 struct sock
*sk
= sock
->sk
;
1189 BT_DBG("sock %p, sk %p", sock
, sk
);
1191 addr
->sa_family
= AF_BLUETOOTH
;
1192 *len
= sizeof(struct sockaddr_l2
);
1195 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1196 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1197 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1199 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1200 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1201 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1207 static void l2cap_monitor_timeout(unsigned long arg
)
1209 struct sock
*sk
= (void *) arg
;
1213 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1214 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1219 l2cap_pi(sk
)->retry_count
++;
1220 __mod_monitor_timer();
1222 control
= L2CAP_CTRL_POLL
;
1223 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1227 static void l2cap_retrans_timeout(unsigned long arg
)
1229 struct sock
*sk
= (void *) arg
;
1233 l2cap_pi(sk
)->retry_count
= 1;
1234 __mod_monitor_timer();
1236 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1238 control
= L2CAP_CTRL_POLL
;
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1243 static void l2cap_drop_acked_frames(struct sock
*sk
)
1245 struct sk_buff
*skb
;
1247 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1248 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1251 skb
= skb_dequeue(TX_QUEUE(sk
));
1254 l2cap_pi(sk
)->unacked_frames
--;
1257 if (!l2cap_pi(sk
)->unacked_frames
)
1258 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1263 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1265 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1268 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1270 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1277 static int l2cap_streaming_send(struct sock
*sk
)
1279 struct sk_buff
*skb
, *tx_skb
;
1280 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1284 while ((skb
= sk
->sk_send_head
)) {
1285 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1287 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1288 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1289 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1291 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1292 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1293 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1296 err
= l2cap_do_send(sk
, tx_skb
);
1298 l2cap_send_disconn_req(pi
->conn
, sk
);
1302 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1304 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1305 sk
->sk_send_head
= NULL
;
1307 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1309 skb
= skb_dequeue(TX_QUEUE(sk
));
1315 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1317 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1318 struct sk_buff
*skb
, *tx_skb
;
1322 skb
= skb_peek(TX_QUEUE(sk
));
1324 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1325 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1327 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1331 if (pi
->remote_max_tx
&&
1332 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1333 l2cap_send_disconn_req(pi
->conn
, sk
);
1337 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1338 bt_cb(skb
)->retries
++;
1339 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1340 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1341 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1342 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1344 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1345 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1346 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1349 err
= l2cap_do_send(sk
, tx_skb
);
1351 l2cap_send_disconn_req(pi
->conn
, sk
);
1359 static int l2cap_ertm_send(struct sock
*sk
)
1361 struct sk_buff
*skb
, *tx_skb
;
1362 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1366 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1369 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1370 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1372 if (pi
->remote_max_tx
&&
1373 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1374 l2cap_send_disconn_req(pi
->conn
, sk
);
1378 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1380 bt_cb(skb
)->retries
++;
1382 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1383 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1384 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1385 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1388 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1389 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1390 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1393 err
= l2cap_do_send(sk
, tx_skb
);
1395 l2cap_send_disconn_req(pi
->conn
, sk
);
1398 __mod_retrans_timer();
1400 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1401 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1403 pi
->unacked_frames
++;
1405 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1406 sk
->sk_send_head
= NULL
;
1408 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1414 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1416 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1417 struct sk_buff
**frag
;
1420 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1427 /* Continuation fragments (no L2CAP header) */
1428 frag
= &skb_shinfo(skb
)->frag_list
;
1430 count
= min_t(unsigned int, conn
->mtu
, len
);
1432 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1435 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1441 frag
= &(*frag
)->next
;
1447 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1449 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1450 struct sk_buff
*skb
;
1451 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1452 struct l2cap_hdr
*lh
;
1454 BT_DBG("sk %p len %d", sk
, (int)len
);
1456 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1457 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1458 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1460 return ERR_PTR(-ENOMEM
);
1462 /* Create L2CAP header */
1463 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1464 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1465 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1466 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1468 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1469 if (unlikely(err
< 0)) {
1471 return ERR_PTR(err
);
1476 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1478 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1479 struct sk_buff
*skb
;
1480 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1481 struct l2cap_hdr
*lh
;
1483 BT_DBG("sk %p len %d", sk
, (int)len
);
1485 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1486 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1487 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1489 return ERR_PTR(-ENOMEM
);
1491 /* Create L2CAP header */
1492 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1493 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1494 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1496 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1497 if (unlikely(err
< 0)) {
1499 return ERR_PTR(err
);
1504 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1506 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1507 struct sk_buff
*skb
;
1508 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1509 struct l2cap_hdr
*lh
;
1511 BT_DBG("sk %p len %d", sk
, (int)len
);
1516 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1519 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1520 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1521 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1523 return ERR_PTR(-ENOMEM
);
1525 /* Create L2CAP header */
1526 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1527 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1528 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1529 put_unaligned_le16(control
, skb_put(skb
, 2));
1531 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1533 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1534 if (unlikely(err
< 0)) {
1536 return ERR_PTR(err
);
1539 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1540 put_unaligned_le16(0, skb_put(skb
, 2));
1542 bt_cb(skb
)->retries
= 0;
1546 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1548 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1549 struct sk_buff
*skb
;
1550 struct sk_buff_head sar_queue
;
1554 __skb_queue_head_init(&sar_queue
);
1555 control
= L2CAP_SDU_START
;
1556 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1558 return PTR_ERR(skb
);
1560 __skb_queue_tail(&sar_queue
, skb
);
1561 len
-= pi
->max_pdu_size
;
1562 size
+=pi
->max_pdu_size
;
1568 if (len
> pi
->max_pdu_size
) {
1569 control
|= L2CAP_SDU_CONTINUE
;
1570 buflen
= pi
->max_pdu_size
;
1572 control
|= L2CAP_SDU_END
;
1576 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1578 skb_queue_purge(&sar_queue
);
1579 return PTR_ERR(skb
);
1582 __skb_queue_tail(&sar_queue
, skb
);
1587 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1588 if (sk
->sk_send_head
== NULL
)
1589 sk
->sk_send_head
= sar_queue
.next
;
1594 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1596 struct sock
*sk
= sock
->sk
;
1597 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1598 struct sk_buff
*skb
;
1602 BT_DBG("sock %p, sk %p", sock
, sk
);
1604 err
= sock_error(sk
);
1608 if (msg
->msg_flags
& MSG_OOB
)
1611 /* Check outgoing MTU */
1612 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
&&
1618 if (sk
->sk_state
!= BT_CONNECTED
) {
1623 /* Connectionless channel */
1624 if (sk
->sk_type
== SOCK_DGRAM
) {
1625 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1626 err
= l2cap_do_send(sk
, skb
);
1631 case L2CAP_MODE_BASIC
:
1632 /* Create a basic PDU */
1633 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1639 err
= l2cap_do_send(sk
, skb
);
1644 case L2CAP_MODE_ERTM
:
1645 case L2CAP_MODE_STREAMING
:
1646 /* Entire SDU fits into one PDU */
1647 if (len
<= pi
->max_pdu_size
) {
1648 control
= L2CAP_SDU_UNSEGMENTED
;
1649 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1654 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1655 if (sk
->sk_send_head
== NULL
)
1656 sk
->sk_send_head
= skb
;
1658 /* Segment SDU into multiples PDUs */
1659 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1664 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1665 err
= l2cap_streaming_send(sk
);
1667 err
= l2cap_ertm_send(sk
);
1674 BT_DBG("bad state %1.1x", pi
->mode
);
1683 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1685 struct sock
*sk
= sock
->sk
;
1689 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1690 struct l2cap_conn_rsp rsp
;
1692 sk
->sk_state
= BT_CONFIG
;
1694 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1695 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1696 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1697 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1698 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1699 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1707 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1710 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1712 struct sock
*sk
= sock
->sk
;
1713 struct l2cap_options opts
;
1717 BT_DBG("sk %p", sk
);
1723 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1724 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1725 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1726 opts
.mode
= l2cap_pi(sk
)->mode
;
1727 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1729 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1730 if (copy_from_user((char *) &opts
, optval
, len
)) {
1735 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1736 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1737 l2cap_pi(sk
)->mode
= opts
.mode
;
1738 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1742 if (get_user(opt
, (u32 __user
*) optval
)) {
1747 if (opt
& L2CAP_LM_AUTH
)
1748 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1749 if (opt
& L2CAP_LM_ENCRYPT
)
1750 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1751 if (opt
& L2CAP_LM_SECURE
)
1752 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1754 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1755 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1767 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1769 struct sock
*sk
= sock
->sk
;
1770 struct bt_security sec
;
1774 BT_DBG("sk %p", sk
);
1776 if (level
== SOL_L2CAP
)
1777 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1779 if (level
!= SOL_BLUETOOTH
)
1780 return -ENOPROTOOPT
;
1786 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1791 sec
.level
= BT_SECURITY_LOW
;
1793 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1794 if (copy_from_user((char *) &sec
, optval
, len
)) {
1799 if (sec
.level
< BT_SECURITY_LOW
||
1800 sec
.level
> BT_SECURITY_HIGH
) {
1805 l2cap_pi(sk
)->sec_level
= sec
.level
;
1808 case BT_DEFER_SETUP
:
1809 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1814 if (get_user(opt
, (u32 __user
*) optval
)) {
1819 bt_sk(sk
)->defer_setup
= opt
;
1831 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1833 struct sock
*sk
= sock
->sk
;
1834 struct l2cap_options opts
;
1835 struct l2cap_conninfo cinfo
;
1839 BT_DBG("sk %p", sk
);
1841 if (get_user(len
, optlen
))
1848 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1849 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1850 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1851 opts
.mode
= l2cap_pi(sk
)->mode
;
1852 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1854 len
= min_t(unsigned int, len
, sizeof(opts
));
1855 if (copy_to_user(optval
, (char *) &opts
, len
))
1861 switch (l2cap_pi(sk
)->sec_level
) {
1862 case BT_SECURITY_LOW
:
1863 opt
= L2CAP_LM_AUTH
;
1865 case BT_SECURITY_MEDIUM
:
1866 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1868 case BT_SECURITY_HIGH
:
1869 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1877 if (l2cap_pi(sk
)->role_switch
)
1878 opt
|= L2CAP_LM_MASTER
;
1880 if (l2cap_pi(sk
)->force_reliable
)
1881 opt
|= L2CAP_LM_RELIABLE
;
1883 if (put_user(opt
, (u32 __user
*) optval
))
1887 case L2CAP_CONNINFO
:
1888 if (sk
->sk_state
!= BT_CONNECTED
&&
1889 !(sk
->sk_state
== BT_CONNECT2
&&
1890 bt_sk(sk
)->defer_setup
)) {
1895 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1896 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1898 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1899 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1913 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1915 struct sock
*sk
= sock
->sk
;
1916 struct bt_security sec
;
1919 BT_DBG("sk %p", sk
);
1921 if (level
== SOL_L2CAP
)
1922 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1924 if (level
!= SOL_BLUETOOTH
)
1925 return -ENOPROTOOPT
;
1927 if (get_user(len
, optlen
))
1934 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1939 sec
.level
= l2cap_pi(sk
)->sec_level
;
1941 len
= min_t(unsigned int, len
, sizeof(sec
));
1942 if (copy_to_user(optval
, (char *) &sec
, len
))
1947 case BT_DEFER_SETUP
:
1948 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1953 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1967 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1969 struct sock
*sk
= sock
->sk
;
1972 BT_DBG("sock %p, sk %p", sock
, sk
);
1978 if (!sk
->sk_shutdown
) {
1979 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1980 l2cap_sock_clear_timer(sk
);
1981 __l2cap_sock_close(sk
, 0);
1983 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1984 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1991 static int l2cap_sock_release(struct socket
*sock
)
1993 struct sock
*sk
= sock
->sk
;
1996 BT_DBG("sock %p, sk %p", sock
, sk
);
2001 err
= l2cap_sock_shutdown(sock
, 2);
2004 l2cap_sock_kill(sk
);
2008 static void l2cap_chan_ready(struct sock
*sk
)
2010 struct sock
*parent
= bt_sk(sk
)->parent
;
2012 BT_DBG("sk %p, parent %p", sk
, parent
);
2014 l2cap_pi(sk
)->conf_state
= 0;
2015 l2cap_sock_clear_timer(sk
);
2018 /* Outgoing channel.
2019 * Wake up socket sleeping on connect.
2021 sk
->sk_state
= BT_CONNECTED
;
2022 sk
->sk_state_change(sk
);
2024 /* Incoming channel.
2025 * Wake up socket sleeping on accept.
2027 parent
->sk_data_ready(parent
, 0);
2031 /* Copy frame to all raw sockets on that connection */
2032 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2034 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2035 struct sk_buff
*nskb
;
2038 BT_DBG("conn %p", conn
);
2040 read_lock(&l
->lock
);
2041 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2042 if (sk
->sk_type
!= SOCK_RAW
)
2045 /* Don't send frame to the socket it came from */
2048 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2052 if (sock_queue_rcv_skb(sk
, nskb
))
2055 read_unlock(&l
->lock
);
2058 /* ---- L2CAP signalling commands ---- */
2059 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2060 u8 code
, u8 ident
, u16 dlen
, void *data
)
2062 struct sk_buff
*skb
, **frag
;
2063 struct l2cap_cmd_hdr
*cmd
;
2064 struct l2cap_hdr
*lh
;
2067 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2068 conn
, code
, ident
, dlen
);
2070 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2071 count
= min_t(unsigned int, conn
->mtu
, len
);
2073 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2077 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2078 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2079 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2081 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2084 cmd
->len
= cpu_to_le16(dlen
);
2087 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2088 memcpy(skb_put(skb
, count
), data
, count
);
2094 /* Continuation fragments (no L2CAP header) */
2095 frag
= &skb_shinfo(skb
)->frag_list
;
2097 count
= min_t(unsigned int, conn
->mtu
, len
);
2099 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2103 memcpy(skb_put(*frag
, count
), data
, count
);
2108 frag
= &(*frag
)->next
;
2118 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2120 struct l2cap_conf_opt
*opt
= *ptr
;
2123 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2131 *val
= *((u8
*) opt
->val
);
2135 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2139 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2143 *val
= (unsigned long) opt
->val
;
2147 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2151 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2153 struct l2cap_conf_opt
*opt
= *ptr
;
2155 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2162 *((u8
*) opt
->val
) = val
;
2166 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2170 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2174 memcpy(opt
->val
, (void *) val
, len
);
2178 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2181 static inline void l2cap_ertm_init(struct sock
*sk
)
2183 l2cap_pi(sk
)->expected_ack_seq
= 0;
2184 l2cap_pi(sk
)->unacked_frames
= 0;
2185 l2cap_pi(sk
)->buffer_seq
= 0;
2186 l2cap_pi(sk
)->num_to_ack
= 0;
2188 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2189 l2cap_retrans_timeout
, (unsigned long) sk
);
2190 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2191 l2cap_monitor_timeout
, (unsigned long) sk
);
2193 __skb_queue_head_init(SREJ_QUEUE(sk
));
2196 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2198 u32 local_feat_mask
= l2cap_feat_mask
;
2200 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2203 case L2CAP_MODE_ERTM
:
2204 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2205 case L2CAP_MODE_STREAMING
:
2206 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2212 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2215 case L2CAP_MODE_STREAMING
:
2216 case L2CAP_MODE_ERTM
:
2217 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2221 return L2CAP_MODE_BASIC
;
2225 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2227 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2228 struct l2cap_conf_req
*req
= data
;
2229 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2230 void *ptr
= req
->data
;
2232 BT_DBG("sk %p", sk
);
2234 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2238 case L2CAP_MODE_STREAMING
:
2239 case L2CAP_MODE_ERTM
:
2240 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2241 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2242 l2cap_send_disconn_req(pi
->conn
, sk
);
2245 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2251 case L2CAP_MODE_BASIC
:
2252 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2253 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2256 case L2CAP_MODE_ERTM
:
2257 rfc
.mode
= L2CAP_MODE_ERTM
;
2258 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2259 rfc
.max_transmit
= max_transmit
;
2260 rfc
.retrans_timeout
= 0;
2261 rfc
.monitor_timeout
= 0;
2262 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2264 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2265 sizeof(rfc
), (unsigned long) &rfc
);
2267 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2270 if (pi
->fcs
== L2CAP_FCS_NONE
||
2271 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2272 pi
->fcs
= L2CAP_FCS_NONE
;
2273 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2277 case L2CAP_MODE_STREAMING
:
2278 rfc
.mode
= L2CAP_MODE_STREAMING
;
2280 rfc
.max_transmit
= 0;
2281 rfc
.retrans_timeout
= 0;
2282 rfc
.monitor_timeout
= 0;
2283 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2285 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2286 sizeof(rfc
), (unsigned long) &rfc
);
2288 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2291 if (pi
->fcs
== L2CAP_FCS_NONE
||
2292 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2293 pi
->fcs
= L2CAP_FCS_NONE
;
2294 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2299 /* FIXME: Need actual value of the flush timeout */
2300 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2301 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2303 req
->dcid
= cpu_to_le16(pi
->dcid
);
2304 req
->flags
= cpu_to_le16(0);
2309 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2311 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2312 struct l2cap_conf_rsp
*rsp
= data
;
2313 void *ptr
= rsp
->data
;
2314 void *req
= pi
->conf_req
;
2315 int len
= pi
->conf_len
;
2316 int type
, hint
, olen
;
2318 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2319 u16 mtu
= L2CAP_DEFAULT_MTU
;
2320 u16 result
= L2CAP_CONF_SUCCESS
;
2322 BT_DBG("sk %p", sk
);
2324 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2325 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2327 hint
= type
& L2CAP_CONF_HINT
;
2328 type
&= L2CAP_CONF_MASK
;
2331 case L2CAP_CONF_MTU
:
2335 case L2CAP_CONF_FLUSH_TO
:
2339 case L2CAP_CONF_QOS
:
2342 case L2CAP_CONF_RFC
:
2343 if (olen
== sizeof(rfc
))
2344 memcpy(&rfc
, (void *) val
, olen
);
2347 case L2CAP_CONF_FCS
:
2348 if (val
== L2CAP_FCS_NONE
)
2349 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2357 result
= L2CAP_CONF_UNKNOWN
;
2358 *((u8
*) ptr
++) = type
;
2363 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2367 case L2CAP_MODE_STREAMING
:
2368 case L2CAP_MODE_ERTM
:
2369 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2370 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2371 return -ECONNREFUSED
;
2374 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2379 if (pi
->mode
!= rfc
.mode
) {
2380 result
= L2CAP_CONF_UNACCEPT
;
2381 rfc
.mode
= pi
->mode
;
2383 if (pi
->num_conf_rsp
== 1)
2384 return -ECONNREFUSED
;
2386 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2387 sizeof(rfc
), (unsigned long) &rfc
);
2391 if (result
== L2CAP_CONF_SUCCESS
) {
2392 /* Configure output options and let the other side know
2393 * which ones we don't like. */
2395 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2396 result
= L2CAP_CONF_UNACCEPT
;
2399 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2401 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2404 case L2CAP_MODE_BASIC
:
2405 pi
->fcs
= L2CAP_FCS_NONE
;
2406 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2409 case L2CAP_MODE_ERTM
:
2410 pi
->remote_tx_win
= rfc
.txwin_size
;
2411 pi
->remote_max_tx
= rfc
.max_transmit
;
2412 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2414 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2415 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2417 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2419 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2420 sizeof(rfc
), (unsigned long) &rfc
);
2424 case L2CAP_MODE_STREAMING
:
2425 pi
->remote_tx_win
= rfc
.txwin_size
;
2426 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2428 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2430 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2431 sizeof(rfc
), (unsigned long) &rfc
);
2436 result
= L2CAP_CONF_UNACCEPT
;
2438 memset(&rfc
, 0, sizeof(rfc
));
2439 rfc
.mode
= pi
->mode
;
2442 if (result
== L2CAP_CONF_SUCCESS
)
2443 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2445 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2446 rsp
->result
= cpu_to_le16(result
);
2447 rsp
->flags
= cpu_to_le16(0x0000);
2452 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2454 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2455 struct l2cap_conf_req
*req
= data
;
2456 void *ptr
= req
->data
;
2459 struct l2cap_conf_rfc rfc
;
2461 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2463 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2464 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2467 case L2CAP_CONF_MTU
:
2468 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2469 *result
= L2CAP_CONF_UNACCEPT
;
2470 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2473 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2476 case L2CAP_CONF_FLUSH_TO
:
2478 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2482 case L2CAP_CONF_RFC
:
2483 if (olen
== sizeof(rfc
))
2484 memcpy(&rfc
, (void *)val
, olen
);
2486 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2487 rfc
.mode
!= pi
->mode
)
2488 return -ECONNREFUSED
;
2490 pi
->mode
= rfc
.mode
;
2493 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2494 sizeof(rfc
), (unsigned long) &rfc
);
2499 if (*result
== L2CAP_CONF_SUCCESS
) {
2501 case L2CAP_MODE_ERTM
:
2502 pi
->remote_tx_win
= rfc
.txwin_size
;
2503 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2504 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2505 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2507 case L2CAP_MODE_STREAMING
:
2508 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2513 req
->dcid
= cpu_to_le16(pi
->dcid
);
2514 req
->flags
= cpu_to_le16(0x0000);
2519 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2521 struct l2cap_conf_rsp
*rsp
= data
;
2522 void *ptr
= rsp
->data
;
2524 BT_DBG("sk %p", sk
);
2526 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2527 rsp
->result
= cpu_to_le16(result
);
2528 rsp
->flags
= cpu_to_le16(flags
);
2533 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2535 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2537 if (rej
->reason
!= 0x0000)
2540 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2541 cmd
->ident
== conn
->info_ident
) {
2542 del_timer(&conn
->info_timer
);
2544 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2545 conn
->info_ident
= 0;
2547 l2cap_conn_start(conn
);
2553 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2555 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2556 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2557 struct l2cap_conn_rsp rsp
;
2558 struct sock
*sk
, *parent
;
2559 int result
, status
= L2CAP_CS_NO_INFO
;
2561 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2562 __le16 psm
= req
->psm
;
2564 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2566 /* Check if we have socket listening on psm */
2567 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2569 result
= L2CAP_CR_BAD_PSM
;
2573 /* Check if the ACL is secure enough (if not SDP) */
2574 if (psm
!= cpu_to_le16(0x0001) &&
2575 !hci_conn_check_link_mode(conn
->hcon
)) {
2576 conn
->disc_reason
= 0x05;
2577 result
= L2CAP_CR_SEC_BLOCK
;
2581 result
= L2CAP_CR_NO_MEM
;
2583 /* Check for backlog size */
2584 if (sk_acceptq_is_full(parent
)) {
2585 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2589 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2593 write_lock_bh(&list
->lock
);
2595 /* Check if we already have channel with that dcid */
2596 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2597 write_unlock_bh(&list
->lock
);
2598 sock_set_flag(sk
, SOCK_ZAPPED
);
2599 l2cap_sock_kill(sk
);
2603 hci_conn_hold(conn
->hcon
);
2605 l2cap_sock_init(sk
, parent
);
2606 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2607 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2608 l2cap_pi(sk
)->psm
= psm
;
2609 l2cap_pi(sk
)->dcid
= scid
;
2611 __l2cap_chan_add(conn
, sk
, parent
);
2612 dcid
= l2cap_pi(sk
)->scid
;
2614 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2616 l2cap_pi(sk
)->ident
= cmd
->ident
;
2618 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2619 if (l2cap_check_security(sk
)) {
2620 if (bt_sk(sk
)->defer_setup
) {
2621 sk
->sk_state
= BT_CONNECT2
;
2622 result
= L2CAP_CR_PEND
;
2623 status
= L2CAP_CS_AUTHOR_PEND
;
2624 parent
->sk_data_ready(parent
, 0);
2626 sk
->sk_state
= BT_CONFIG
;
2627 result
= L2CAP_CR_SUCCESS
;
2628 status
= L2CAP_CS_NO_INFO
;
2631 sk
->sk_state
= BT_CONNECT2
;
2632 result
= L2CAP_CR_PEND
;
2633 status
= L2CAP_CS_AUTHEN_PEND
;
2636 sk
->sk_state
= BT_CONNECT2
;
2637 result
= L2CAP_CR_PEND
;
2638 status
= L2CAP_CS_NO_INFO
;
2641 write_unlock_bh(&list
->lock
);
2644 bh_unlock_sock(parent
);
2647 rsp
.scid
= cpu_to_le16(scid
);
2648 rsp
.dcid
= cpu_to_le16(dcid
);
2649 rsp
.result
= cpu_to_le16(result
);
2650 rsp
.status
= cpu_to_le16(status
);
2651 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2653 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2654 struct l2cap_info_req info
;
2655 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2657 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2658 conn
->info_ident
= l2cap_get_ident(conn
);
2660 mod_timer(&conn
->info_timer
, jiffies
+
2661 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2663 l2cap_send_cmd(conn
, conn
->info_ident
,
2664 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2670 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2672 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2673 u16 scid
, dcid
, result
, status
;
2677 scid
= __le16_to_cpu(rsp
->scid
);
2678 dcid
= __le16_to_cpu(rsp
->dcid
);
2679 result
= __le16_to_cpu(rsp
->result
);
2680 status
= __le16_to_cpu(rsp
->status
);
2682 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2685 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2689 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2695 case L2CAP_CR_SUCCESS
:
2696 sk
->sk_state
= BT_CONFIG
;
2697 l2cap_pi(sk
)->ident
= 0;
2698 l2cap_pi(sk
)->dcid
= dcid
;
2699 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2701 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2703 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2704 l2cap_build_conf_req(sk
, req
), req
);
2705 l2cap_pi(sk
)->num_conf_req
++;
2709 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2713 l2cap_chan_del(sk
, ECONNREFUSED
);
2721 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2723 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2729 dcid
= __le16_to_cpu(req
->dcid
);
2730 flags
= __le16_to_cpu(req
->flags
);
2732 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2734 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2738 if (sk
->sk_state
== BT_DISCONN
)
2741 /* Reject if config buffer is too small. */
2742 len
= cmd_len
- sizeof(*req
);
2743 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2744 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2745 l2cap_build_conf_rsp(sk
, rsp
,
2746 L2CAP_CONF_REJECT
, flags
), rsp
);
2751 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2752 l2cap_pi(sk
)->conf_len
+= len
;
2754 if (flags
& 0x0001) {
2755 /* Incomplete config. Send empty response. */
2756 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2757 l2cap_build_conf_rsp(sk
, rsp
,
2758 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2762 /* Complete config. */
2763 len
= l2cap_parse_conf_req(sk
, rsp
);
2765 l2cap_send_disconn_req(conn
, sk
);
2769 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2770 l2cap_pi(sk
)->num_conf_rsp
++;
2772 /* Reset config buffer. */
2773 l2cap_pi(sk
)->conf_len
= 0;
2775 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2778 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2779 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2780 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2781 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2783 sk
->sk_state
= BT_CONNECTED
;
2785 l2cap_pi(sk
)->next_tx_seq
= 0;
2786 l2cap_pi(sk
)->expected_tx_seq
= 0;
2787 __skb_queue_head_init(TX_QUEUE(sk
));
2788 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2789 l2cap_ertm_init(sk
);
2791 l2cap_chan_ready(sk
);
2795 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2797 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2798 l2cap_build_conf_req(sk
, buf
), buf
);
2799 l2cap_pi(sk
)->num_conf_req
++;
2807 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2809 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2810 u16 scid
, flags
, result
;
2813 scid
= __le16_to_cpu(rsp
->scid
);
2814 flags
= __le16_to_cpu(rsp
->flags
);
2815 result
= __le16_to_cpu(rsp
->result
);
2817 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2818 scid
, flags
, result
);
2820 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2825 case L2CAP_CONF_SUCCESS
:
2828 case L2CAP_CONF_UNACCEPT
:
2829 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2830 int len
= cmd
->len
- sizeof(*rsp
);
2833 /* throw out any old stored conf requests */
2834 result
= L2CAP_CONF_SUCCESS
;
2835 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2838 l2cap_send_disconn_req(conn
, sk
);
2842 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2843 L2CAP_CONF_REQ
, len
, req
);
2844 l2cap_pi(sk
)->num_conf_req
++;
2845 if (result
!= L2CAP_CONF_SUCCESS
)
2851 sk
->sk_state
= BT_DISCONN
;
2852 sk
->sk_err
= ECONNRESET
;
2853 l2cap_sock_set_timer(sk
, HZ
* 5);
2854 l2cap_send_disconn_req(conn
, sk
);
2861 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2863 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2864 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2865 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2866 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2868 sk
->sk_state
= BT_CONNECTED
;
2869 l2cap_pi(sk
)->next_tx_seq
= 0;
2870 l2cap_pi(sk
)->expected_tx_seq
= 0;
2871 __skb_queue_head_init(TX_QUEUE(sk
));
2872 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2873 l2cap_ertm_init(sk
);
2875 l2cap_chan_ready(sk
);
2883 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2885 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2886 struct l2cap_disconn_rsp rsp
;
2890 scid
= __le16_to_cpu(req
->scid
);
2891 dcid
= __le16_to_cpu(req
->dcid
);
2893 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2895 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2899 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2900 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2901 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2903 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2905 skb_queue_purge(TX_QUEUE(sk
));
2907 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2908 skb_queue_purge(SREJ_QUEUE(sk
));
2909 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2910 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2913 l2cap_chan_del(sk
, ECONNRESET
);
2916 l2cap_sock_kill(sk
);
2920 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2922 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2926 scid
= __le16_to_cpu(rsp
->scid
);
2927 dcid
= __le16_to_cpu(rsp
->dcid
);
2929 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2931 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2935 skb_queue_purge(TX_QUEUE(sk
));
2937 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2938 skb_queue_purge(SREJ_QUEUE(sk
));
2939 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2940 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2943 l2cap_chan_del(sk
, 0);
2946 l2cap_sock_kill(sk
);
2950 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2952 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2955 type
= __le16_to_cpu(req
->type
);
2957 BT_DBG("type 0x%4.4x", type
);
2959 if (type
== L2CAP_IT_FEAT_MASK
) {
2961 u32 feat_mask
= l2cap_feat_mask
;
2962 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2963 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2964 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2966 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2968 put_unaligned_le32(feat_mask
, rsp
->data
);
2969 l2cap_send_cmd(conn
, cmd
->ident
,
2970 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2971 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2973 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2974 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2975 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2976 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2977 l2cap_send_cmd(conn
, cmd
->ident
,
2978 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2980 struct l2cap_info_rsp rsp
;
2981 rsp
.type
= cpu_to_le16(type
);
2982 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2983 l2cap_send_cmd(conn
, cmd
->ident
,
2984 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2990 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2992 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2995 type
= __le16_to_cpu(rsp
->type
);
2996 result
= __le16_to_cpu(rsp
->result
);
2998 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3000 del_timer(&conn
->info_timer
);
3002 if (type
== L2CAP_IT_FEAT_MASK
) {
3003 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3005 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3006 struct l2cap_info_req req
;
3007 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3009 conn
->info_ident
= l2cap_get_ident(conn
);
3011 l2cap_send_cmd(conn
, conn
->info_ident
,
3012 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3014 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3015 conn
->info_ident
= 0;
3017 l2cap_conn_start(conn
);
3019 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3020 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3021 conn
->info_ident
= 0;
3023 l2cap_conn_start(conn
);
3029 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3031 u8
*data
= skb
->data
;
3033 struct l2cap_cmd_hdr cmd
;
3036 l2cap_raw_recv(conn
, skb
);
3038 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3040 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3041 data
+= L2CAP_CMD_HDR_SIZE
;
3042 len
-= L2CAP_CMD_HDR_SIZE
;
3044 cmd_len
= le16_to_cpu(cmd
.len
);
3046 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3048 if (cmd_len
> len
|| !cmd
.ident
) {
3049 BT_DBG("corrupted command");
3054 case L2CAP_COMMAND_REJ
:
3055 l2cap_command_rej(conn
, &cmd
, data
);
3058 case L2CAP_CONN_REQ
:
3059 err
= l2cap_connect_req(conn
, &cmd
, data
);
3062 case L2CAP_CONN_RSP
:
3063 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3066 case L2CAP_CONF_REQ
:
3067 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3070 case L2CAP_CONF_RSP
:
3071 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3074 case L2CAP_DISCONN_REQ
:
3075 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3078 case L2CAP_DISCONN_RSP
:
3079 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3082 case L2CAP_ECHO_REQ
:
3083 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3086 case L2CAP_ECHO_RSP
:
3089 case L2CAP_INFO_REQ
:
3090 err
= l2cap_information_req(conn
, &cmd
, data
);
3093 case L2CAP_INFO_RSP
:
3094 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3098 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3104 struct l2cap_cmd_rej rej
;
3105 BT_DBG("error %d", err
);
3107 /* FIXME: Map err to a valid reason */
3108 rej
.reason
= cpu_to_le16(0);
3109 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3119 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3121 u16 our_fcs
, rcv_fcs
;
3122 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3124 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3125 skb_trim(skb
, skb
->len
- 2);
3126 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3127 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3129 if (our_fcs
!= rcv_fcs
)
3135 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3137 struct sk_buff
*next_skb
;
3139 bt_cb(skb
)->tx_seq
= tx_seq
;
3140 bt_cb(skb
)->sar
= sar
;
3142 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3144 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3149 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3150 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3154 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3157 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3159 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3162 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3164 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3165 struct sk_buff
*_skb
;
3168 switch (control
& L2CAP_CTRL_SAR
) {
3169 case L2CAP_SDU_UNSEGMENTED
:
3170 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3175 err
= sock_queue_rcv_skb(sk
, skb
);
3181 case L2CAP_SDU_START
:
3182 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3187 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3190 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3196 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3198 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3199 pi
->partial_sdu_len
= skb
->len
;
3203 case L2CAP_SDU_CONTINUE
:
3204 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3207 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3209 pi
->partial_sdu_len
+= skb
->len
;
3210 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3218 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3221 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3223 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3224 pi
->partial_sdu_len
+= skb
->len
;
3226 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3227 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3228 err
= sock_queue_rcv_skb(sk
, _skb
);
3242 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3244 struct sk_buff
*skb
;
3247 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3248 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3251 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3252 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3253 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3254 l2cap_pi(sk
)->buffer_seq_srej
=
3255 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3260 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3262 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3263 struct srej_list
*l
, *tmp
;
3266 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3267 if (l
->tx_seq
== tx_seq
) {
3272 control
= L2CAP_SUPER_SELECT_REJECT
;
3273 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3274 l2cap_send_sframe(pi
, control
);
3276 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3280 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3282 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3283 struct srej_list
*new;
3286 while (tx_seq
!= pi
->expected_tx_seq
) {
3287 control
= L2CAP_SUPER_SELECT_REJECT
;
3288 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3289 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3290 control
|= L2CAP_CTRL_POLL
;
3291 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3293 l2cap_send_sframe(pi
, control
);
3295 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3296 new->tx_seq
= pi
->expected_tx_seq
++;
3297 list_add_tail(&new->list
, SREJ_LIST(sk
));
3299 pi
->expected_tx_seq
++;
3302 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3304 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3305 u8 tx_seq
= __get_txseq(rx_control
);
3306 u8 req_seq
= __get_reqseq(rx_control
);
3308 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3311 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3313 pi
->expected_ack_seq
= req_seq
;
3314 l2cap_drop_acked_frames(sk
);
3316 if (tx_seq
== pi
->expected_tx_seq
)
3319 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3320 struct srej_list
*first
;
3322 first
= list_first_entry(SREJ_LIST(sk
),
3323 struct srej_list
, list
);
3324 if (tx_seq
== first
->tx_seq
) {
3325 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3326 l2cap_check_srej_gap(sk
, tx_seq
);
3328 list_del(&first
->list
);
3331 if (list_empty(SREJ_LIST(sk
))) {
3332 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3333 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3336 struct srej_list
*l
;
3337 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3339 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3340 if (l
->tx_seq
== tx_seq
) {
3341 l2cap_resend_srejframe(sk
, tx_seq
);
3345 l2cap_send_srejframe(sk
, tx_seq
);
3348 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3350 INIT_LIST_HEAD(SREJ_LIST(sk
));
3351 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3353 __skb_queue_head_init(SREJ_QUEUE(sk
));
3354 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3356 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3358 l2cap_send_srejframe(sk
, tx_seq
);
3363 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3365 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3366 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3370 if (rx_control
& L2CAP_CTRL_FINAL
) {
3371 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3372 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3374 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3375 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3376 l2cap_ertm_send(sk
);
3380 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3382 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3386 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3387 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3388 tx_control
|= L2CAP_SUPER_RCV_READY
;
3389 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3390 l2cap_send_sframe(pi
, tx_control
);
3395 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3397 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3398 u8 tx_seq
= __get_reqseq(rx_control
);
3400 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3402 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3403 case L2CAP_SUPER_RCV_READY
:
3404 if (rx_control
& L2CAP_CTRL_POLL
) {
3405 u16 control
= L2CAP_CTRL_FINAL
;
3406 control
|= L2CAP_SUPER_RCV_READY
|
3407 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3408 l2cap_send_sframe(l2cap_pi(sk
), control
);
3409 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3411 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3412 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3413 pi
->expected_ack_seq
= tx_seq
;
3414 l2cap_drop_acked_frames(sk
);
3416 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3417 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3419 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3420 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3421 l2cap_ertm_send(sk
);
3424 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3427 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3428 del_timer(&pi
->monitor_timer
);
3430 if (pi
->unacked_frames
> 0)
3431 __mod_retrans_timer();
3433 pi
->expected_ack_seq
= tx_seq
;
3434 l2cap_drop_acked_frames(sk
);
3436 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3437 (pi
->unacked_frames
> 0))
3438 __mod_retrans_timer();
3440 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3441 l2cap_ertm_send(sk
);
3445 case L2CAP_SUPER_REJECT
:
3446 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3448 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3449 l2cap_drop_acked_frames(sk
);
3451 if (rx_control
& L2CAP_CTRL_FINAL
) {
3452 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3453 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3455 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3456 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3457 l2cap_ertm_send(sk
);
3460 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3461 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3462 l2cap_ertm_send(sk
);
3464 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3465 pi
->srej_save_reqseq
= tx_seq
;
3466 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3472 case L2CAP_SUPER_SELECT_REJECT
:
3473 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3475 if (rx_control
& L2CAP_CTRL_POLL
) {
3476 pi
->expected_ack_seq
= tx_seq
;
3477 l2cap_drop_acked_frames(sk
);
3478 l2cap_retransmit_frame(sk
, tx_seq
);
3479 l2cap_ertm_send(sk
);
3480 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3481 pi
->srej_save_reqseq
= tx_seq
;
3482 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3484 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3485 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3486 pi
->srej_save_reqseq
== tx_seq
)
3487 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3489 l2cap_retransmit_frame(sk
, tx_seq
);
3492 l2cap_retransmit_frame(sk
, tx_seq
);
3493 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3494 pi
->srej_save_reqseq
= tx_seq
;
3495 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3500 case L2CAP_SUPER_RCV_NOT_READY
:
3501 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3502 pi
->expected_ack_seq
= tx_seq
;
3503 l2cap_drop_acked_frames(sk
);
3505 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3506 if (rx_control
& L2CAP_CTRL_POLL
) {
3507 u16 control
= L2CAP_CTRL_FINAL
;
3508 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3516 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3519 struct l2cap_pinfo
*pi
;
3523 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3525 BT_DBG("unknown cid 0x%4.4x", cid
);
3531 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3533 if (sk
->sk_state
!= BT_CONNECTED
)
3537 case L2CAP_MODE_BASIC
:
3538 /* If socket recv buffers overflows we drop data here
3539 * which is *bad* because L2CAP has to be reliable.
3540 * But we don't have any other choice. L2CAP doesn't
3541 * provide flow control mechanism. */
3543 if (pi
->imtu
< skb
->len
)
3546 if (!sock_queue_rcv_skb(sk
, skb
))
3550 case L2CAP_MODE_ERTM
:
3551 control
= get_unaligned_le16(skb
->data
);
3555 if (__is_sar_start(control
))
3558 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3562 * We can just drop the corrupted I-frame here.
3563 * Receiver will miss it and start proper recovery
3564 * procedures and ask retransmission.
3566 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3569 if (l2cap_check_fcs(pi
, skb
))
3572 if (__is_iframe(control
))
3573 l2cap_data_channel_iframe(sk
, control
, skb
);
3575 l2cap_data_channel_sframe(sk
, control
, skb
);
3579 case L2CAP_MODE_STREAMING
:
3580 control
= get_unaligned_le16(skb
->data
);
3584 if (__is_sar_start(control
))
3587 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3590 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3593 if (l2cap_check_fcs(pi
, skb
))
3596 tx_seq
= __get_txseq(control
);
3598 if (pi
->expected_tx_seq
== tx_seq
)
3599 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3601 pi
->expected_tx_seq
= tx_seq
+ 1;
3603 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3608 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3622 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3626 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3630 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3632 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3635 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3638 if (!sock_queue_rcv_skb(sk
, skb
))
3650 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3652 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3656 skb_pull(skb
, L2CAP_HDR_SIZE
);
3657 cid
= __le16_to_cpu(lh
->cid
);
3658 len
= __le16_to_cpu(lh
->len
);
3660 if (len
!= skb
->len
) {
3665 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3668 case L2CAP_CID_SIGNALING
:
3669 l2cap_sig_channel(conn
, skb
);
3672 case L2CAP_CID_CONN_LESS
:
3673 psm
= get_unaligned_le16(skb
->data
);
3675 l2cap_conless_channel(conn
, psm
, skb
);
3679 l2cap_data_channel(conn
, cid
, skb
);
3684 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3688 int exact
= 0, lm1
= 0, lm2
= 0;
3689 register struct sock
*sk
;
3690 struct hlist_node
*node
;
3692 if (type
!= ACL_LINK
)
3695 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3697 /* Find listening sockets and check their link_mode */
3698 read_lock(&l2cap_sk_list
.lock
);
3699 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3700 if (sk
->sk_state
!= BT_LISTEN
)
3703 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3704 lm1
|= HCI_LM_ACCEPT
;
3705 if (l2cap_pi(sk
)->role_switch
)
3706 lm1
|= HCI_LM_MASTER
;
3708 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3709 lm2
|= HCI_LM_ACCEPT
;
3710 if (l2cap_pi(sk
)->role_switch
)
3711 lm2
|= HCI_LM_MASTER
;
3714 read_unlock(&l2cap_sk_list
.lock
);
3716 return exact
? lm1
: lm2
;
3719 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3721 struct l2cap_conn
*conn
;
3723 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3725 if (hcon
->type
!= ACL_LINK
)
3729 conn
= l2cap_conn_add(hcon
, status
);
3731 l2cap_conn_ready(conn
);
3733 l2cap_conn_del(hcon
, bt_err(status
));
3738 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3740 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3742 BT_DBG("hcon %p", hcon
);
3744 if (hcon
->type
!= ACL_LINK
|| !conn
)
3747 return conn
->disc_reason
;
3750 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3752 BT_DBG("hcon %p reason %d", hcon
, reason
);
3754 if (hcon
->type
!= ACL_LINK
)
3757 l2cap_conn_del(hcon
, bt_err(reason
));
3762 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3764 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3767 if (encrypt
== 0x00) {
3768 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3769 l2cap_sock_clear_timer(sk
);
3770 l2cap_sock_set_timer(sk
, HZ
* 5);
3771 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3772 __l2cap_sock_close(sk
, ECONNREFUSED
);
3774 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3775 l2cap_sock_clear_timer(sk
);
3779 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3781 struct l2cap_chan_list
*l
;
3782 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3788 l
= &conn
->chan_list
;
3790 BT_DBG("conn %p", conn
);
3792 read_lock(&l
->lock
);
3794 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3797 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3802 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3803 sk
->sk_state
== BT_CONFIG
)) {
3804 l2cap_check_encryption(sk
, encrypt
);
3809 if (sk
->sk_state
== BT_CONNECT
) {
3811 struct l2cap_conn_req req
;
3812 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3813 req
.psm
= l2cap_pi(sk
)->psm
;
3815 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3817 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3818 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3820 l2cap_sock_clear_timer(sk
);
3821 l2cap_sock_set_timer(sk
, HZ
/ 10);
3823 } else if (sk
->sk_state
== BT_CONNECT2
) {
3824 struct l2cap_conn_rsp rsp
;
3828 sk
->sk_state
= BT_CONFIG
;
3829 result
= L2CAP_CR_SUCCESS
;
3831 sk
->sk_state
= BT_DISCONN
;
3832 l2cap_sock_set_timer(sk
, HZ
/ 10);
3833 result
= L2CAP_CR_SEC_BLOCK
;
3836 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3837 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3838 rsp
.result
= cpu_to_le16(result
);
3839 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3840 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3841 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3847 read_unlock(&l
->lock
);
3852 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3854 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3856 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3859 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3861 if (flags
& ACL_START
) {
3862 struct l2cap_hdr
*hdr
;
3866 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3867 kfree_skb(conn
->rx_skb
);
3868 conn
->rx_skb
= NULL
;
3870 l2cap_conn_unreliable(conn
, ECOMM
);
3874 BT_ERR("Frame is too short (len %d)", skb
->len
);
3875 l2cap_conn_unreliable(conn
, ECOMM
);
3879 hdr
= (struct l2cap_hdr
*) skb
->data
;
3880 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3882 if (len
== skb
->len
) {
3883 /* Complete frame received */
3884 l2cap_recv_frame(conn
, skb
);
3888 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3890 if (skb
->len
> len
) {
3891 BT_ERR("Frame is too long (len %d, expected len %d)",
3893 l2cap_conn_unreliable(conn
, ECOMM
);
3897 /* Allocate skb for the complete frame (with header) */
3898 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3902 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3904 conn
->rx_len
= len
- skb
->len
;
3906 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3908 if (!conn
->rx_len
) {
3909 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3910 l2cap_conn_unreliable(conn
, ECOMM
);
3914 if (skb
->len
> conn
->rx_len
) {
3915 BT_ERR("Fragment is too long (len %d, expected %d)",
3916 skb
->len
, conn
->rx_len
);
3917 kfree_skb(conn
->rx_skb
);
3918 conn
->rx_skb
= NULL
;
3920 l2cap_conn_unreliable(conn
, ECOMM
);
3924 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3926 conn
->rx_len
-= skb
->len
;
3928 if (!conn
->rx_len
) {
3929 /* Complete frame received */
3930 l2cap_recv_frame(conn
, conn
->rx_skb
);
3931 conn
->rx_skb
= NULL
;
3940 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
3943 struct hlist_node
*node
;
3946 read_lock_bh(&l2cap_sk_list
.lock
);
3948 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3949 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3951 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3952 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
3953 sk
->sk_state
, __le16_to_cpu(pi
->psm
), pi
->scid
,
3954 pi
->dcid
, pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3957 read_unlock_bh(&l2cap_sk_list
.lock
);
3962 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
3964 static const struct proto_ops l2cap_sock_ops
= {
3965 .family
= PF_BLUETOOTH
,
3966 .owner
= THIS_MODULE
,
3967 .release
= l2cap_sock_release
,
3968 .bind
= l2cap_sock_bind
,
3969 .connect
= l2cap_sock_connect
,
3970 .listen
= l2cap_sock_listen
,
3971 .accept
= l2cap_sock_accept
,
3972 .getname
= l2cap_sock_getname
,
3973 .sendmsg
= l2cap_sock_sendmsg
,
3974 .recvmsg
= l2cap_sock_recvmsg
,
3975 .poll
= bt_sock_poll
,
3976 .ioctl
= bt_sock_ioctl
,
3977 .mmap
= sock_no_mmap
,
3978 .socketpair
= sock_no_socketpair
,
3979 .shutdown
= l2cap_sock_shutdown
,
3980 .setsockopt
= l2cap_sock_setsockopt
,
3981 .getsockopt
= l2cap_sock_getsockopt
3984 static const struct net_proto_family l2cap_sock_family_ops
= {
3985 .family
= PF_BLUETOOTH
,
3986 .owner
= THIS_MODULE
,
3987 .create
= l2cap_sock_create
,
3990 static struct hci_proto l2cap_hci_proto
= {
3992 .id
= HCI_PROTO_L2CAP
,
3993 .connect_ind
= l2cap_connect_ind
,
3994 .connect_cfm
= l2cap_connect_cfm
,
3995 .disconn_ind
= l2cap_disconn_ind
,
3996 .disconn_cfm
= l2cap_disconn_cfm
,
3997 .security_cfm
= l2cap_security_cfm
,
3998 .recv_acldata
= l2cap_recv_acldata
4001 static int __init
l2cap_init(void)
4005 err
= proto_register(&l2cap_proto
, 0);
4009 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4011 BT_ERR("L2CAP socket registration failed");
4015 err
= hci_register_proto(&l2cap_hci_proto
);
4017 BT_ERR("L2CAP protocol registration failed");
4018 bt_sock_unregister(BTPROTO_L2CAP
);
4022 if (class_create_file(bt_class
, &class_attr_l2cap
) < 0)
4023 BT_ERR("Failed to create L2CAP info file");
4025 BT_INFO("L2CAP ver %s", VERSION
);
4026 BT_INFO("L2CAP socket layer initialized");
4031 proto_unregister(&l2cap_proto
);
4035 static void __exit
l2cap_exit(void)
4037 class_remove_file(bt_class
, &class_attr_l2cap
);
4039 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4040 BT_ERR("L2CAP socket unregistration failed");
4042 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4043 BT_ERR("L2CAP protocol unregistration failed");
4045 proto_unregister(&l2cap_proto
);
4048 void l2cap_load(void)
4050 /* Dummy function to trigger automatic L2CAP module loading by
4051 * other modules that use L2CAP sockets but don't use any other
4052 * symbols from it. */
4055 EXPORT_SYMBOL(l2cap_load
);
4057 module_init(l2cap_init
);
4058 module_exit(l2cap_exit
);
4060 module_param(enable_ertm
, bool, 0644);
4061 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4063 module_param(max_transmit
, uint
, 0644);
4064 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4066 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4067 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4068 MODULE_VERSION(VERSION
);
4069 MODULE_LICENSE("GPL");
4070 MODULE_ALIAS("bt-proto-0");