2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
59 static int max_transmit
= L2CAP_DEFAULT_MAX_TX
;
61 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
62 static u8 l2cap_fixed_chan
[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops
;
66 static struct bt_sock_list l2cap_sk_list
= {
67 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
70 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
71 static void l2cap_sock_close(struct sock
*sk
);
72 static void l2cap_sock_kill(struct sock
*sk
);
74 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
75 u8 code
, u8 ident
, u16 dlen
, void *data
);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg
)
80 struct sock
*sk
= (struct sock
*) arg
;
83 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
87 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
88 reason
= ECONNREFUSED
;
89 else if (sk
->sk_state
== BT_CONNECT
&&
90 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
91 reason
= ECONNREFUSED
;
95 __l2cap_sock_close(sk
, reason
);
103 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
105 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
106 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
109 static void l2cap_sock_clear_timer(struct sock
*sk
)
111 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
112 sk_stop_timer(sk
, &sk
->sk_timer
);
115 /* ---- L2CAP channels ---- */
116 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
119 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
120 if (l2cap_pi(s
)->dcid
== cid
)
126 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
129 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
130 if (l2cap_pi(s
)->scid
== cid
)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
142 s
= __l2cap_get_chan_by_scid(l
, cid
);
145 read_unlock(&l
->lock
);
149 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
152 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
153 if (l2cap_pi(s
)->ident
== ident
)
159 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
163 s
= __l2cap_get_chan_by_ident(l
, ident
);
166 read_unlock(&l
->lock
);
170 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
172 u16 cid
= L2CAP_CID_DYN_START
;
174 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
175 if (!__l2cap_get_chan_by_scid(l
, cid
))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
187 l2cap_pi(l
->head
)->prev_c
= sk
;
189 l2cap_pi(sk
)->next_c
= l
->head
;
190 l2cap_pi(sk
)->prev_c
= NULL
;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
196 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
198 write_lock_bh(&l
->lock
);
203 l2cap_pi(next
)->prev_c
= prev
;
205 l2cap_pi(prev
)->next_c
= next
;
206 write_unlock_bh(&l
->lock
);
211 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
213 struct l2cap_chan_list
*l
= &conn
->chan_list
;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
216 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
218 conn
->disc_reason
= 0x13;
220 l2cap_pi(sk
)->conn
= conn
;
222 if (sk
->sk_type
== SOCK_SEQPACKET
) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
225 } else if (sk
->sk_type
== SOCK_DGRAM
) {
226 /* Connectionless socket */
227 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
228 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
229 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
233 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
234 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
237 __l2cap_chan_link(l
, sk
);
240 bt_accept_enqueue(parent
, sk
);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock
*sk
, int err
)
247 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
248 struct sock
*parent
= bt_sk(sk
)->parent
;
250 l2cap_sock_clear_timer(sk
);
252 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn
->chan_list
, sk
);
257 l2cap_pi(sk
)->conn
= NULL
;
258 hci_conn_put(conn
->hcon
);
261 sk
->sk_state
= BT_CLOSED
;
262 sock_set_flag(sk
, SOCK_ZAPPED
);
268 bt_accept_unlink(sk
);
269 parent
->sk_data_ready(parent
, 0);
271 sk
->sk_state_change(sk
);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock
*sk
)
277 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
280 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
282 auth_type
= HCI_AT_NO_BONDING_MITM
;
284 auth_type
= HCI_AT_NO_BONDING
;
286 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
287 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
289 switch (l2cap_pi(sk
)->sec_level
) {
290 case BT_SECURITY_HIGH
:
291 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
293 case BT_SECURITY_MEDIUM
:
294 auth_type
= HCI_AT_GENERAL_BONDING
;
297 auth_type
= HCI_AT_NO_BONDING
;
302 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
306 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn
->lock
);
318 if (++conn
->tx_ident
> 128)
323 spin_unlock_bh(&conn
->lock
);
328 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
330 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
332 BT_DBG("code 0x%2.2x", code
);
337 return hci_send_acl(conn
->hcon
, skb
, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
343 struct l2cap_hdr
*lh
;
344 struct l2cap_conn
*conn
= pi
->conn
;
345 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
347 if (pi
->fcs
== L2CAP_FCS_CRC16
)
350 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
352 count
= min_t(unsigned int, conn
->mtu
, hlen
);
353 control
|= L2CAP_CTRL_FRAME_TYPE
;
355 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
359 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
360 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
361 lh
->cid
= cpu_to_le16(pi
->dcid
);
362 put_unaligned_le16(control
, skb_put(skb
, 2));
364 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
365 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
366 put_unaligned_le16(fcs
, skb_put(skb
, 2));
369 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
374 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
375 control
|= L2CAP_SUPER_RCV_NOT_READY
;
377 control
|= L2CAP_SUPER_RCV_READY
;
379 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
381 return l2cap_send_sframe(pi
, control
);
384 static void l2cap_do_start(struct sock
*sk
)
386 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
388 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
389 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
392 if (l2cap_check_security(sk
)) {
393 struct l2cap_conn_req req
;
394 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
395 req
.psm
= l2cap_pi(sk
)->psm
;
397 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
399 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
400 L2CAP_CONN_REQ
, sizeof(req
), &req
);
403 struct l2cap_info_req req
;
404 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
406 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
407 conn
->info_ident
= l2cap_get_ident(conn
);
409 mod_timer(&conn
->info_timer
, jiffies
+
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
412 l2cap_send_cmd(conn
, conn
->info_ident
,
413 L2CAP_INFO_REQ
, sizeof(req
), &req
);
417 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
419 struct l2cap_disconn_req req
;
421 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
422 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
423 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
424 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn
*conn
)
430 struct l2cap_chan_list
*l
= &conn
->chan_list
;
433 BT_DBG("conn %p", conn
);
437 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
440 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
445 if (sk
->sk_state
== BT_CONNECT
) {
446 if (l2cap_check_security(sk
)) {
447 struct l2cap_conn_req req
;
448 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
449 req
.psm
= l2cap_pi(sk
)->psm
;
451 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
453 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
454 L2CAP_CONN_REQ
, sizeof(req
), &req
);
456 } else if (sk
->sk_state
== BT_CONNECT2
) {
457 struct l2cap_conn_rsp rsp
;
458 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
459 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
461 if (l2cap_check_security(sk
)) {
462 if (bt_sk(sk
)->defer_setup
) {
463 struct sock
*parent
= bt_sk(sk
)->parent
;
464 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
465 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
466 parent
->sk_data_ready(parent
, 0);
469 sk
->sk_state
= BT_CONFIG
;
470 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
471 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
474 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
475 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
478 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
479 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
485 read_unlock(&l
->lock
);
488 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
490 struct l2cap_chan_list
*l
= &conn
->chan_list
;
493 BT_DBG("conn %p", conn
);
497 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
500 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
501 l2cap_sock_clear_timer(sk
);
502 sk
->sk_state
= BT_CONNECTED
;
503 sk
->sk_state_change(sk
);
504 } else if (sk
->sk_state
== BT_CONNECT
)
510 read_unlock(&l
->lock
);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
516 struct l2cap_chan_list
*l
= &conn
->chan_list
;
519 BT_DBG("conn %p", conn
);
523 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
524 if (l2cap_pi(sk
)->force_reliable
)
528 read_unlock(&l
->lock
);
531 static void l2cap_info_timeout(unsigned long arg
)
533 struct l2cap_conn
*conn
= (void *) arg
;
535 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
536 conn
->info_ident
= 0;
538 l2cap_conn_start(conn
);
541 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
543 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
548 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
552 hcon
->l2cap_data
= conn
;
555 BT_DBG("hcon %p conn %p", hcon
, conn
);
557 conn
->mtu
= hcon
->hdev
->acl_mtu
;
558 conn
->src
= &hcon
->hdev
->bdaddr
;
559 conn
->dst
= &hcon
->dst
;
563 spin_lock_init(&conn
->lock
);
564 rwlock_init(&conn
->chan_list
.lock
);
566 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
567 (unsigned long) conn
);
569 conn
->disc_reason
= 0x13;
574 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
576 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
582 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
584 kfree_skb(conn
->rx_skb
);
587 while ((sk
= conn
->chan_list
.head
)) {
589 l2cap_chan_del(sk
, err
);
594 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
595 del_timer_sync(&conn
->info_timer
);
597 hcon
->l2cap_data
= NULL
;
601 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
603 struct l2cap_chan_list
*l
= &conn
->chan_list
;
604 write_lock_bh(&l
->lock
);
605 __l2cap_chan_add(conn
, sk
, parent
);
606 write_unlock_bh(&l
->lock
);
609 /* ---- Socket interface ---- */
610 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
613 struct hlist_node
*node
;
614 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
615 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
627 struct sock
*sk
= NULL
, *sk1
= NULL
;
628 struct hlist_node
*node
;
630 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
631 if (state
&& sk
->sk_state
!= state
)
634 if (l2cap_pi(sk
)->psm
== psm
) {
636 if (!bacmp(&bt_sk(sk
)->src
, src
))
640 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
644 return node
? sk
: sk1
;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
652 read_lock(&l2cap_sk_list
.lock
);
653 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
656 read_unlock(&l2cap_sk_list
.lock
);
660 static void l2cap_sock_destruct(struct sock
*sk
)
664 skb_queue_purge(&sk
->sk_receive_queue
);
665 skb_queue_purge(&sk
->sk_write_queue
);
668 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
672 BT_DBG("parent %p", parent
);
674 /* Close not yet accepted channels */
675 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
676 l2cap_sock_close(sk
);
678 parent
->sk_state
= BT_CLOSED
;
679 sock_set_flag(parent
, SOCK_ZAPPED
);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock
*sk
)
687 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
690 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list
, sk
);
694 sock_set_flag(sk
, SOCK_DEAD
);
698 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
700 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
702 switch (sk
->sk_state
) {
704 l2cap_sock_cleanup_listen(sk
);
709 if (sk
->sk_type
== SOCK_SEQPACKET
) {
710 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
712 sk
->sk_state
= BT_DISCONN
;
713 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
714 l2cap_send_disconn_req(conn
, sk
);
716 l2cap_chan_del(sk
, reason
);
720 if (sk
->sk_type
== SOCK_SEQPACKET
) {
721 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
722 struct l2cap_conn_rsp rsp
;
725 if (bt_sk(sk
)->defer_setup
)
726 result
= L2CAP_CR_SEC_BLOCK
;
728 result
= L2CAP_CR_BAD_PSM
;
730 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
731 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
732 rsp
.result
= cpu_to_le16(result
);
733 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
734 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
735 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
737 l2cap_chan_del(sk
, reason
);
742 l2cap_chan_del(sk
, reason
);
746 sock_set_flag(sk
, SOCK_ZAPPED
);
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock
*sk
)
754 l2cap_sock_clear_timer(sk
);
756 __l2cap_sock_close(sk
, ECONNRESET
);
761 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
763 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
768 sk
->sk_type
= parent
->sk_type
;
769 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
771 pi
->imtu
= l2cap_pi(parent
)->imtu
;
772 pi
->omtu
= l2cap_pi(parent
)->omtu
;
773 pi
->mode
= l2cap_pi(parent
)->mode
;
774 pi
->fcs
= l2cap_pi(parent
)->fcs
;
775 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
776 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
777 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
779 pi
->imtu
= L2CAP_DEFAULT_MTU
;
781 pi
->mode
= L2CAP_MODE_BASIC
;
782 pi
->fcs
= L2CAP_FCS_CRC16
;
783 pi
->sec_level
= BT_SECURITY_LOW
;
785 pi
->force_reliable
= 0;
788 /* Default config options */
790 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
791 skb_queue_head_init(TX_QUEUE(sk
));
792 skb_queue_head_init(SREJ_QUEUE(sk
));
793 INIT_LIST_HEAD(SREJ_LIST(sk
));
796 static struct proto l2cap_proto
= {
798 .owner
= THIS_MODULE
,
799 .obj_size
= sizeof(struct l2cap_pinfo
)
802 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
806 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
810 sock_init_data(sock
, sk
);
811 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
813 sk
->sk_destruct
= l2cap_sock_destruct
;
814 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
816 sock_reset_flag(sk
, SOCK_ZAPPED
);
818 sk
->sk_protocol
= proto
;
819 sk
->sk_state
= BT_OPEN
;
821 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
823 bt_sock_link(&l2cap_sk_list
, sk
);
827 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
832 BT_DBG("sock %p", sock
);
834 sock
->state
= SS_UNCONNECTED
;
836 if (sock
->type
!= SOCK_SEQPACKET
&&
837 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
838 return -ESOCKTNOSUPPORT
;
840 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
843 sock
->ops
= &l2cap_sock_ops
;
845 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
849 l2cap_sock_init(sk
, NULL
);
853 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
855 struct sock
*sk
= sock
->sk
;
856 struct sockaddr_l2 la
;
861 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
864 memset(&la
, 0, sizeof(la
));
865 len
= min_t(unsigned int, sizeof(la
), alen
);
866 memcpy(&la
, addr
, len
);
873 if (sk
->sk_state
!= BT_OPEN
) {
878 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE
)) {
884 write_lock_bh(&l2cap_sk_list
.lock
);
886 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
889 /* Save source address */
890 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
891 l2cap_pi(sk
)->psm
= la
.l2_psm
;
892 l2cap_pi(sk
)->sport
= la
.l2_psm
;
893 sk
->sk_state
= BT_BOUND
;
895 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
896 __le16_to_cpu(la
.l2_psm
) == 0x0003)
897 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
900 write_unlock_bh(&l2cap_sk_list
.lock
);
907 static int l2cap_do_connect(struct sock
*sk
)
909 bdaddr_t
*src
= &bt_sk(sk
)->src
;
910 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
911 struct l2cap_conn
*conn
;
912 struct hci_conn
*hcon
;
913 struct hci_dev
*hdev
;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
920 hdev
= hci_get_route(dst
, src
);
922 return -EHOSTUNREACH
;
924 hci_dev_lock_bh(hdev
);
928 if (sk
->sk_type
== SOCK_RAW
) {
929 switch (l2cap_pi(sk
)->sec_level
) {
930 case BT_SECURITY_HIGH
:
931 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
933 case BT_SECURITY_MEDIUM
:
934 auth_type
= HCI_AT_DEDICATED_BONDING
;
937 auth_type
= HCI_AT_NO_BONDING
;
940 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
942 auth_type
= HCI_AT_NO_BONDING_MITM
;
944 auth_type
= HCI_AT_NO_BONDING
;
946 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
947 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
949 switch (l2cap_pi(sk
)->sec_level
) {
950 case BT_SECURITY_HIGH
:
951 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
953 case BT_SECURITY_MEDIUM
:
954 auth_type
= HCI_AT_GENERAL_BONDING
;
957 auth_type
= HCI_AT_NO_BONDING
;
962 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
963 l2cap_pi(sk
)->sec_level
, auth_type
);
967 conn
= l2cap_conn_add(hcon
, 0);
975 /* Update source addr of the socket */
976 bacpy(src
, conn
->src
);
978 l2cap_chan_add(conn
, sk
, NULL
);
980 sk
->sk_state
= BT_CONNECT
;
981 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
983 if (hcon
->state
== BT_CONNECTED
) {
984 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
985 l2cap_sock_clear_timer(sk
);
986 sk
->sk_state
= BT_CONNECTED
;
992 hci_dev_unlock_bh(hdev
);
997 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
999 struct sock
*sk
= sock
->sk
;
1000 struct sockaddr_l2 la
;
1003 BT_DBG("sk %p", sk
);
1005 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1006 addr
->sa_family
!= AF_BLUETOOTH
)
1009 memset(&la
, 0, sizeof(la
));
1010 len
= min_t(unsigned int, sizeof(la
), alen
);
1011 memcpy(&la
, addr
, len
);
1018 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1023 switch (l2cap_pi(sk
)->mode
) {
1024 case L2CAP_MODE_BASIC
:
1026 case L2CAP_MODE_ERTM
:
1027 case L2CAP_MODE_STREAMING
:
1036 switch (sk
->sk_state
) {
1040 /* Already connecting */
1044 /* Already connected */
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1059 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1061 err
= l2cap_do_connect(sk
);
1066 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1067 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1073 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1075 struct sock
*sk
= sock
->sk
;
1078 BT_DBG("sk %p backlog %d", sk
, backlog
);
1082 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1087 switch (l2cap_pi(sk
)->mode
) {
1088 case L2CAP_MODE_BASIC
:
1090 case L2CAP_MODE_ERTM
:
1091 case L2CAP_MODE_STREAMING
:
1100 if (!l2cap_pi(sk
)->psm
) {
1101 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1106 write_lock_bh(&l2cap_sk_list
.lock
);
1108 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1110 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1111 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1116 write_unlock_bh(&l2cap_sk_list
.lock
);
1122 sk
->sk_max_ack_backlog
= backlog
;
1123 sk
->sk_ack_backlog
= 0;
1124 sk
->sk_state
= BT_LISTEN
;
1131 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1133 DECLARE_WAITQUEUE(wait
, current
);
1134 struct sock
*sk
= sock
->sk
, *nsk
;
1138 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1140 if (sk
->sk_state
!= BT_LISTEN
) {
1145 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1147 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1151 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1152 set_current_state(TASK_INTERRUPTIBLE
);
1159 timeo
= schedule_timeout(timeo
);
1160 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1162 if (sk
->sk_state
!= BT_LISTEN
) {
1167 if (signal_pending(current
)) {
1168 err
= sock_intr_errno(timeo
);
1172 set_current_state(TASK_RUNNING
);
1173 remove_wait_queue(sk_sleep(sk
), &wait
);
1178 newsock
->state
= SS_CONNECTED
;
1180 BT_DBG("new socket %p", nsk
);
1187 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1189 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1190 struct sock
*sk
= sock
->sk
;
1192 BT_DBG("sock %p, sk %p", sock
, sk
);
1194 addr
->sa_family
= AF_BLUETOOTH
;
1195 *len
= sizeof(struct sockaddr_l2
);
1198 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1199 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1200 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1202 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1203 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1204 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1210 static void l2cap_monitor_timeout(unsigned long arg
)
1212 struct sock
*sk
= (void *) arg
;
1216 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1217 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1222 l2cap_pi(sk
)->retry_count
++;
1223 __mod_monitor_timer();
1225 control
= L2CAP_CTRL_POLL
;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1230 static void l2cap_retrans_timeout(unsigned long arg
)
1232 struct sock
*sk
= (void *) arg
;
1236 l2cap_pi(sk
)->retry_count
= 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1241 control
= L2CAP_CTRL_POLL
;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1246 static void l2cap_drop_acked_frames(struct sock
*sk
)
1248 struct sk_buff
*skb
;
1250 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1251 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1254 skb
= skb_dequeue(TX_QUEUE(sk
));
1257 l2cap_pi(sk
)->unacked_frames
--;
1260 if (!l2cap_pi(sk
)->unacked_frames
)
1261 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1266 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1268 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1271 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1273 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1280 static int l2cap_streaming_send(struct sock
*sk
)
1282 struct sk_buff
*skb
, *tx_skb
;
1283 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1287 while ((skb
= sk
->sk_send_head
)) {
1288 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1290 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1291 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1292 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1294 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1295 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1296 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1299 err
= l2cap_do_send(sk
, tx_skb
);
1301 l2cap_send_disconn_req(pi
->conn
, sk
);
1305 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1308 sk
->sk_send_head
= NULL
;
1310 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1312 skb
= skb_dequeue(TX_QUEUE(sk
));
1318 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1320 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1321 struct sk_buff
*skb
, *tx_skb
;
1325 skb
= skb_peek(TX_QUEUE(sk
));
1327 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1328 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1330 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1334 if (pi
->remote_max_tx
&&
1335 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1336 l2cap_send_disconn_req(pi
->conn
, sk
);
1340 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1341 bt_cb(skb
)->retries
++;
1342 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1343 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1344 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1345 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1347 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1348 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1349 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1352 err
= l2cap_do_send(sk
, tx_skb
);
1354 l2cap_send_disconn_req(pi
->conn
, sk
);
1362 static int l2cap_ertm_send(struct sock
*sk
)
1364 struct sk_buff
*skb
, *tx_skb
;
1365 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1369 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1372 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
)) &&
1373 !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1375 if (pi
->remote_max_tx
&&
1376 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1377 l2cap_send_disconn_req(pi
->conn
, sk
);
1381 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1383 bt_cb(skb
)->retries
++;
1385 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1386 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1387 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1388 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1391 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1392 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1393 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1396 err
= l2cap_do_send(sk
, tx_skb
);
1398 l2cap_send_disconn_req(pi
->conn
, sk
);
1401 __mod_retrans_timer();
1403 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1404 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1406 pi
->unacked_frames
++;
1408 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1409 sk
->sk_send_head
= NULL
;
1411 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1417 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1419 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1420 struct sk_buff
**frag
;
1423 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1430 /* Continuation fragments (no L2CAP header) */
1431 frag
= &skb_shinfo(skb
)->frag_list
;
1433 count
= min_t(unsigned int, conn
->mtu
, len
);
1435 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1438 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1444 frag
= &(*frag
)->next
;
1450 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1452 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1453 struct sk_buff
*skb
;
1454 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1455 struct l2cap_hdr
*lh
;
1457 BT_DBG("sk %p len %d", sk
, (int)len
);
1459 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1460 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1461 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1463 return ERR_PTR(-ENOMEM
);
1465 /* Create L2CAP header */
1466 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1467 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1468 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1469 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1471 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1472 if (unlikely(err
< 0)) {
1474 return ERR_PTR(err
);
1479 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1481 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1482 struct sk_buff
*skb
;
1483 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1484 struct l2cap_hdr
*lh
;
1486 BT_DBG("sk %p len %d", sk
, (int)len
);
1488 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1489 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1490 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1492 return ERR_PTR(-ENOMEM
);
1494 /* Create L2CAP header */
1495 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1496 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1497 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1499 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1500 if (unlikely(err
< 0)) {
1502 return ERR_PTR(err
);
1507 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1509 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1510 struct sk_buff
*skb
;
1511 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1512 struct l2cap_hdr
*lh
;
1514 BT_DBG("sk %p len %d", sk
, (int)len
);
1519 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1522 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1523 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1524 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1526 return ERR_PTR(-ENOMEM
);
1528 /* Create L2CAP header */
1529 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1530 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1531 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1532 put_unaligned_le16(control
, skb_put(skb
, 2));
1534 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1536 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1537 if (unlikely(err
< 0)) {
1539 return ERR_PTR(err
);
1542 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1543 put_unaligned_le16(0, skb_put(skb
, 2));
1545 bt_cb(skb
)->retries
= 0;
1549 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1551 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1552 struct sk_buff
*skb
;
1553 struct sk_buff_head sar_queue
;
1557 __skb_queue_head_init(&sar_queue
);
1558 control
= L2CAP_SDU_START
;
1559 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1561 return PTR_ERR(skb
);
1563 __skb_queue_tail(&sar_queue
, skb
);
1564 len
-= pi
->max_pdu_size
;
1565 size
+=pi
->max_pdu_size
;
1571 if (len
> pi
->max_pdu_size
) {
1572 control
|= L2CAP_SDU_CONTINUE
;
1573 buflen
= pi
->max_pdu_size
;
1575 control
|= L2CAP_SDU_END
;
1579 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1581 skb_queue_purge(&sar_queue
);
1582 return PTR_ERR(skb
);
1585 __skb_queue_tail(&sar_queue
, skb
);
1590 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1591 if (sk
->sk_send_head
== NULL
)
1592 sk
->sk_send_head
= sar_queue
.next
;
1597 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1599 struct sock
*sk
= sock
->sk
;
1600 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1601 struct sk_buff
*skb
;
1605 BT_DBG("sock %p, sk %p", sock
, sk
);
1607 err
= sock_error(sk
);
1611 if (msg
->msg_flags
& MSG_OOB
)
1614 /* Check outgoing MTU */
1615 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
&&
1621 if (sk
->sk_state
!= BT_CONNECTED
) {
1626 /* Connectionless channel */
1627 if (sk
->sk_type
== SOCK_DGRAM
) {
1628 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1629 err
= l2cap_do_send(sk
, skb
);
1634 case L2CAP_MODE_BASIC
:
1635 /* Create a basic PDU */
1636 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1642 err
= l2cap_do_send(sk
, skb
);
1647 case L2CAP_MODE_ERTM
:
1648 case L2CAP_MODE_STREAMING
:
1649 /* Entire SDU fits into one PDU */
1650 if (len
<= pi
->max_pdu_size
) {
1651 control
= L2CAP_SDU_UNSEGMENTED
;
1652 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1657 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1658 if (sk
->sk_send_head
== NULL
)
1659 sk
->sk_send_head
= skb
;
1661 /* Segment SDU into multiples PDUs */
1662 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1667 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1668 err
= l2cap_streaming_send(sk
);
1670 err
= l2cap_ertm_send(sk
);
1677 BT_DBG("bad state %1.1x", pi
->mode
);
1686 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1688 struct sock
*sk
= sock
->sk
;
1692 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1693 struct l2cap_conn_rsp rsp
;
1695 sk
->sk_state
= BT_CONFIG
;
1697 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1698 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1699 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1700 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1701 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1702 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1710 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1713 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1715 struct sock
*sk
= sock
->sk
;
1716 struct l2cap_options opts
;
1720 BT_DBG("sk %p", sk
);
1726 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1727 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1728 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1729 opts
.mode
= l2cap_pi(sk
)->mode
;
1730 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1732 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1733 if (copy_from_user((char *) &opts
, optval
, len
)) {
1738 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1739 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1740 l2cap_pi(sk
)->mode
= opts
.mode
;
1741 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1745 if (get_user(opt
, (u32 __user
*) optval
)) {
1750 if (opt
& L2CAP_LM_AUTH
)
1751 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1752 if (opt
& L2CAP_LM_ENCRYPT
)
1753 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1754 if (opt
& L2CAP_LM_SECURE
)
1755 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1757 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1758 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1770 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1772 struct sock
*sk
= sock
->sk
;
1773 struct bt_security sec
;
1777 BT_DBG("sk %p", sk
);
1779 if (level
== SOL_L2CAP
)
1780 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1782 if (level
!= SOL_BLUETOOTH
)
1783 return -ENOPROTOOPT
;
1789 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1794 sec
.level
= BT_SECURITY_LOW
;
1796 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1797 if (copy_from_user((char *) &sec
, optval
, len
)) {
1802 if (sec
.level
< BT_SECURITY_LOW
||
1803 sec
.level
> BT_SECURITY_HIGH
) {
1808 l2cap_pi(sk
)->sec_level
= sec
.level
;
1811 case BT_DEFER_SETUP
:
1812 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1817 if (get_user(opt
, (u32 __user
*) optval
)) {
1822 bt_sk(sk
)->defer_setup
= opt
;
1834 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1836 struct sock
*sk
= sock
->sk
;
1837 struct l2cap_options opts
;
1838 struct l2cap_conninfo cinfo
;
1842 BT_DBG("sk %p", sk
);
1844 if (get_user(len
, optlen
))
1851 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1852 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1853 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1854 opts
.mode
= l2cap_pi(sk
)->mode
;
1855 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1857 len
= min_t(unsigned int, len
, sizeof(opts
));
1858 if (copy_to_user(optval
, (char *) &opts
, len
))
1864 switch (l2cap_pi(sk
)->sec_level
) {
1865 case BT_SECURITY_LOW
:
1866 opt
= L2CAP_LM_AUTH
;
1868 case BT_SECURITY_MEDIUM
:
1869 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1871 case BT_SECURITY_HIGH
:
1872 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1880 if (l2cap_pi(sk
)->role_switch
)
1881 opt
|= L2CAP_LM_MASTER
;
1883 if (l2cap_pi(sk
)->force_reliable
)
1884 opt
|= L2CAP_LM_RELIABLE
;
1886 if (put_user(opt
, (u32 __user
*) optval
))
1890 case L2CAP_CONNINFO
:
1891 if (sk
->sk_state
!= BT_CONNECTED
&&
1892 !(sk
->sk_state
== BT_CONNECT2
&&
1893 bt_sk(sk
)->defer_setup
)) {
1898 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1899 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1901 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1902 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1916 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1918 struct sock
*sk
= sock
->sk
;
1919 struct bt_security sec
;
1922 BT_DBG("sk %p", sk
);
1924 if (level
== SOL_L2CAP
)
1925 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1927 if (level
!= SOL_BLUETOOTH
)
1928 return -ENOPROTOOPT
;
1930 if (get_user(len
, optlen
))
1937 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1942 sec
.level
= l2cap_pi(sk
)->sec_level
;
1944 len
= min_t(unsigned int, len
, sizeof(sec
));
1945 if (copy_to_user(optval
, (char *) &sec
, len
))
1950 case BT_DEFER_SETUP
:
1951 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1956 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1970 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1972 struct sock
*sk
= sock
->sk
;
1975 BT_DBG("sock %p, sk %p", sock
, sk
);
1981 if (!sk
->sk_shutdown
) {
1982 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1983 l2cap_sock_clear_timer(sk
);
1984 __l2cap_sock_close(sk
, 0);
1986 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1987 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1994 static int l2cap_sock_release(struct socket
*sock
)
1996 struct sock
*sk
= sock
->sk
;
1999 BT_DBG("sock %p, sk %p", sock
, sk
);
2004 err
= l2cap_sock_shutdown(sock
, 2);
2007 l2cap_sock_kill(sk
);
2011 static void l2cap_chan_ready(struct sock
*sk
)
2013 struct sock
*parent
= bt_sk(sk
)->parent
;
2015 BT_DBG("sk %p, parent %p", sk
, parent
);
2017 l2cap_pi(sk
)->conf_state
= 0;
2018 l2cap_sock_clear_timer(sk
);
2021 /* Outgoing channel.
2022 * Wake up socket sleeping on connect.
2024 sk
->sk_state
= BT_CONNECTED
;
2025 sk
->sk_state_change(sk
);
2027 /* Incoming channel.
2028 * Wake up socket sleeping on accept.
2030 parent
->sk_data_ready(parent
, 0);
2034 /* Copy frame to all raw sockets on that connection */
2035 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2037 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2038 struct sk_buff
*nskb
;
2041 BT_DBG("conn %p", conn
);
2043 read_lock(&l
->lock
);
2044 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2045 if (sk
->sk_type
!= SOCK_RAW
)
2048 /* Don't send frame to the socket it came from */
2051 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2055 if (sock_queue_rcv_skb(sk
, nskb
))
2058 read_unlock(&l
->lock
);
2061 /* ---- L2CAP signalling commands ---- */
2062 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2063 u8 code
, u8 ident
, u16 dlen
, void *data
)
2065 struct sk_buff
*skb
, **frag
;
2066 struct l2cap_cmd_hdr
*cmd
;
2067 struct l2cap_hdr
*lh
;
2070 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2071 conn
, code
, ident
, dlen
);
2073 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2074 count
= min_t(unsigned int, conn
->mtu
, len
);
2076 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2080 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2081 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2082 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2084 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2087 cmd
->len
= cpu_to_le16(dlen
);
2090 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2091 memcpy(skb_put(skb
, count
), data
, count
);
2097 /* Continuation fragments (no L2CAP header) */
2098 frag
= &skb_shinfo(skb
)->frag_list
;
2100 count
= min_t(unsigned int, conn
->mtu
, len
);
2102 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2106 memcpy(skb_put(*frag
, count
), data
, count
);
2111 frag
= &(*frag
)->next
;
2121 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2123 struct l2cap_conf_opt
*opt
= *ptr
;
2126 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2134 *val
= *((u8
*) opt
->val
);
2138 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2142 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2146 *val
= (unsigned long) opt
->val
;
2150 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2154 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2156 struct l2cap_conf_opt
*opt
= *ptr
;
2158 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2165 *((u8
*) opt
->val
) = val
;
2169 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2173 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2177 memcpy(opt
->val
, (void *) val
, len
);
2181 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2184 static inline void l2cap_ertm_init(struct sock
*sk
)
2186 l2cap_pi(sk
)->expected_ack_seq
= 0;
2187 l2cap_pi(sk
)->unacked_frames
= 0;
2188 l2cap_pi(sk
)->buffer_seq
= 0;
2189 l2cap_pi(sk
)->num_to_ack
= 0;
2191 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2192 l2cap_retrans_timeout
, (unsigned long) sk
);
2193 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2194 l2cap_monitor_timeout
, (unsigned long) sk
);
2196 __skb_queue_head_init(SREJ_QUEUE(sk
));
2199 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2201 u32 local_feat_mask
= l2cap_feat_mask
;
2203 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2206 case L2CAP_MODE_ERTM
:
2207 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2208 case L2CAP_MODE_STREAMING
:
2209 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2215 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2218 case L2CAP_MODE_STREAMING
:
2219 case L2CAP_MODE_ERTM
:
2220 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2224 return L2CAP_MODE_BASIC
;
2228 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2230 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2231 struct l2cap_conf_req
*req
= data
;
2232 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2233 void *ptr
= req
->data
;
2235 BT_DBG("sk %p", sk
);
2237 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2241 case L2CAP_MODE_STREAMING
:
2242 case L2CAP_MODE_ERTM
:
2243 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2244 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2245 l2cap_send_disconn_req(pi
->conn
, sk
);
2248 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2254 case L2CAP_MODE_BASIC
:
2255 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2256 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2259 case L2CAP_MODE_ERTM
:
2260 rfc
.mode
= L2CAP_MODE_ERTM
;
2261 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2262 rfc
.max_transmit
= max_transmit
;
2263 rfc
.retrans_timeout
= 0;
2264 rfc
.monitor_timeout
= 0;
2265 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2267 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2268 sizeof(rfc
), (unsigned long) &rfc
);
2270 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2273 if (pi
->fcs
== L2CAP_FCS_NONE
||
2274 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2275 pi
->fcs
= L2CAP_FCS_NONE
;
2276 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2280 case L2CAP_MODE_STREAMING
:
2281 rfc
.mode
= L2CAP_MODE_STREAMING
;
2283 rfc
.max_transmit
= 0;
2284 rfc
.retrans_timeout
= 0;
2285 rfc
.monitor_timeout
= 0;
2286 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2288 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2289 sizeof(rfc
), (unsigned long) &rfc
);
2291 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2294 if (pi
->fcs
== L2CAP_FCS_NONE
||
2295 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2296 pi
->fcs
= L2CAP_FCS_NONE
;
2297 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2302 /* FIXME: Need actual value of the flush timeout */
2303 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2304 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2306 req
->dcid
= cpu_to_le16(pi
->dcid
);
2307 req
->flags
= cpu_to_le16(0);
2312 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2314 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2315 struct l2cap_conf_rsp
*rsp
= data
;
2316 void *ptr
= rsp
->data
;
2317 void *req
= pi
->conf_req
;
2318 int len
= pi
->conf_len
;
2319 int type
, hint
, olen
;
2321 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2322 u16 mtu
= L2CAP_DEFAULT_MTU
;
2323 u16 result
= L2CAP_CONF_SUCCESS
;
2325 BT_DBG("sk %p", sk
);
2327 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2328 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2330 hint
= type
& L2CAP_CONF_HINT
;
2331 type
&= L2CAP_CONF_MASK
;
2334 case L2CAP_CONF_MTU
:
2338 case L2CAP_CONF_FLUSH_TO
:
2342 case L2CAP_CONF_QOS
:
2345 case L2CAP_CONF_RFC
:
2346 if (olen
== sizeof(rfc
))
2347 memcpy(&rfc
, (void *) val
, olen
);
2350 case L2CAP_CONF_FCS
:
2351 if (val
== L2CAP_FCS_NONE
)
2352 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2360 result
= L2CAP_CONF_UNKNOWN
;
2361 *((u8
*) ptr
++) = type
;
2366 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2370 case L2CAP_MODE_STREAMING
:
2371 case L2CAP_MODE_ERTM
:
2372 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2373 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2374 return -ECONNREFUSED
;
2377 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2382 if (pi
->mode
!= rfc
.mode
) {
2383 result
= L2CAP_CONF_UNACCEPT
;
2384 rfc
.mode
= pi
->mode
;
2386 if (pi
->num_conf_rsp
== 1)
2387 return -ECONNREFUSED
;
2389 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2390 sizeof(rfc
), (unsigned long) &rfc
);
2394 if (result
== L2CAP_CONF_SUCCESS
) {
2395 /* Configure output options and let the other side know
2396 * which ones we don't like. */
2398 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2399 result
= L2CAP_CONF_UNACCEPT
;
2402 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2404 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2407 case L2CAP_MODE_BASIC
:
2408 pi
->fcs
= L2CAP_FCS_NONE
;
2409 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2412 case L2CAP_MODE_ERTM
:
2413 pi
->remote_tx_win
= rfc
.txwin_size
;
2414 pi
->remote_max_tx
= rfc
.max_transmit
;
2415 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2417 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2418 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2420 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2422 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2423 sizeof(rfc
), (unsigned long) &rfc
);
2427 case L2CAP_MODE_STREAMING
:
2428 pi
->remote_tx_win
= rfc
.txwin_size
;
2429 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2431 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2433 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2434 sizeof(rfc
), (unsigned long) &rfc
);
2439 result
= L2CAP_CONF_UNACCEPT
;
2441 memset(&rfc
, 0, sizeof(rfc
));
2442 rfc
.mode
= pi
->mode
;
2445 if (result
== L2CAP_CONF_SUCCESS
)
2446 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2448 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2449 rsp
->result
= cpu_to_le16(result
);
2450 rsp
->flags
= cpu_to_le16(0x0000);
2455 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2457 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2458 struct l2cap_conf_req
*req
= data
;
2459 void *ptr
= req
->data
;
2462 struct l2cap_conf_rfc rfc
;
2464 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2466 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2467 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2470 case L2CAP_CONF_MTU
:
2471 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2472 *result
= L2CAP_CONF_UNACCEPT
;
2473 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2476 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2479 case L2CAP_CONF_FLUSH_TO
:
2481 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2485 case L2CAP_CONF_RFC
:
2486 if (olen
== sizeof(rfc
))
2487 memcpy(&rfc
, (void *)val
, olen
);
2489 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2490 rfc
.mode
!= pi
->mode
)
2491 return -ECONNREFUSED
;
2493 pi
->mode
= rfc
.mode
;
2496 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2497 sizeof(rfc
), (unsigned long) &rfc
);
2502 if (*result
== L2CAP_CONF_SUCCESS
) {
2504 case L2CAP_MODE_ERTM
:
2505 pi
->remote_tx_win
= rfc
.txwin_size
;
2506 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2507 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2508 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2510 case L2CAP_MODE_STREAMING
:
2511 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2516 req
->dcid
= cpu_to_le16(pi
->dcid
);
2517 req
->flags
= cpu_to_le16(0x0000);
2522 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2524 struct l2cap_conf_rsp
*rsp
= data
;
2525 void *ptr
= rsp
->data
;
2527 BT_DBG("sk %p", sk
);
2529 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2530 rsp
->result
= cpu_to_le16(result
);
2531 rsp
->flags
= cpu_to_le16(flags
);
2536 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2538 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2540 if (rej
->reason
!= 0x0000)
2543 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2544 cmd
->ident
== conn
->info_ident
) {
2545 del_timer(&conn
->info_timer
);
2547 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2548 conn
->info_ident
= 0;
2550 l2cap_conn_start(conn
);
2556 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2558 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2559 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2560 struct l2cap_conn_rsp rsp
;
2561 struct sock
*sk
, *parent
;
2562 int result
, status
= L2CAP_CS_NO_INFO
;
2564 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2565 __le16 psm
= req
->psm
;
2567 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2569 /* Check if we have socket listening on psm */
2570 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2572 result
= L2CAP_CR_BAD_PSM
;
2576 /* Check if the ACL is secure enough (if not SDP) */
2577 if (psm
!= cpu_to_le16(0x0001) &&
2578 !hci_conn_check_link_mode(conn
->hcon
)) {
2579 conn
->disc_reason
= 0x05;
2580 result
= L2CAP_CR_SEC_BLOCK
;
2584 result
= L2CAP_CR_NO_MEM
;
2586 /* Check for backlog size */
2587 if (sk_acceptq_is_full(parent
)) {
2588 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2592 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2596 write_lock_bh(&list
->lock
);
2598 /* Check if we already have channel with that dcid */
2599 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2600 write_unlock_bh(&list
->lock
);
2601 sock_set_flag(sk
, SOCK_ZAPPED
);
2602 l2cap_sock_kill(sk
);
2606 hci_conn_hold(conn
->hcon
);
2608 l2cap_sock_init(sk
, parent
);
2609 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2610 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2611 l2cap_pi(sk
)->psm
= psm
;
2612 l2cap_pi(sk
)->dcid
= scid
;
2614 __l2cap_chan_add(conn
, sk
, parent
);
2615 dcid
= l2cap_pi(sk
)->scid
;
2617 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2619 l2cap_pi(sk
)->ident
= cmd
->ident
;
2621 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2622 if (l2cap_check_security(sk
)) {
2623 if (bt_sk(sk
)->defer_setup
) {
2624 sk
->sk_state
= BT_CONNECT2
;
2625 result
= L2CAP_CR_PEND
;
2626 status
= L2CAP_CS_AUTHOR_PEND
;
2627 parent
->sk_data_ready(parent
, 0);
2629 sk
->sk_state
= BT_CONFIG
;
2630 result
= L2CAP_CR_SUCCESS
;
2631 status
= L2CAP_CS_NO_INFO
;
2634 sk
->sk_state
= BT_CONNECT2
;
2635 result
= L2CAP_CR_PEND
;
2636 status
= L2CAP_CS_AUTHEN_PEND
;
2639 sk
->sk_state
= BT_CONNECT2
;
2640 result
= L2CAP_CR_PEND
;
2641 status
= L2CAP_CS_NO_INFO
;
2644 write_unlock_bh(&list
->lock
);
2647 bh_unlock_sock(parent
);
2650 rsp
.scid
= cpu_to_le16(scid
);
2651 rsp
.dcid
= cpu_to_le16(dcid
);
2652 rsp
.result
= cpu_to_le16(result
);
2653 rsp
.status
= cpu_to_le16(status
);
2654 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2656 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2657 struct l2cap_info_req info
;
2658 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2660 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2661 conn
->info_ident
= l2cap_get_ident(conn
);
2663 mod_timer(&conn
->info_timer
, jiffies
+
2664 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2666 l2cap_send_cmd(conn
, conn
->info_ident
,
2667 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2673 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2675 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2676 u16 scid
, dcid
, result
, status
;
2680 scid
= __le16_to_cpu(rsp
->scid
);
2681 dcid
= __le16_to_cpu(rsp
->dcid
);
2682 result
= __le16_to_cpu(rsp
->result
);
2683 status
= __le16_to_cpu(rsp
->status
);
2685 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2688 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2692 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2698 case L2CAP_CR_SUCCESS
:
2699 sk
->sk_state
= BT_CONFIG
;
2700 l2cap_pi(sk
)->ident
= 0;
2701 l2cap_pi(sk
)->dcid
= dcid
;
2702 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2704 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2706 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2707 l2cap_build_conf_req(sk
, req
), req
);
2708 l2cap_pi(sk
)->num_conf_req
++;
2712 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2716 l2cap_chan_del(sk
, ECONNREFUSED
);
2724 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2726 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2732 dcid
= __le16_to_cpu(req
->dcid
);
2733 flags
= __le16_to_cpu(req
->flags
);
2735 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2737 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2741 if (sk
->sk_state
== BT_DISCONN
)
2744 /* Reject if config buffer is too small. */
2745 len
= cmd_len
- sizeof(*req
);
2746 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2747 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2748 l2cap_build_conf_rsp(sk
, rsp
,
2749 L2CAP_CONF_REJECT
, flags
), rsp
);
2754 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2755 l2cap_pi(sk
)->conf_len
+= len
;
2757 if (flags
& 0x0001) {
2758 /* Incomplete config. Send empty response. */
2759 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2760 l2cap_build_conf_rsp(sk
, rsp
,
2761 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2765 /* Complete config. */
2766 len
= l2cap_parse_conf_req(sk
, rsp
);
2768 l2cap_send_disconn_req(conn
, sk
);
2772 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2773 l2cap_pi(sk
)->num_conf_rsp
++;
2775 /* Reset config buffer. */
2776 l2cap_pi(sk
)->conf_len
= 0;
2778 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2781 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2782 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2783 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2784 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2786 sk
->sk_state
= BT_CONNECTED
;
2788 l2cap_pi(sk
)->next_tx_seq
= 0;
2789 l2cap_pi(sk
)->expected_tx_seq
= 0;
2790 __skb_queue_head_init(TX_QUEUE(sk
));
2791 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2792 l2cap_ertm_init(sk
);
2794 l2cap_chan_ready(sk
);
2798 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2800 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2801 l2cap_build_conf_req(sk
, buf
), buf
);
2802 l2cap_pi(sk
)->num_conf_req
++;
2810 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2812 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2813 u16 scid
, flags
, result
;
2816 scid
= __le16_to_cpu(rsp
->scid
);
2817 flags
= __le16_to_cpu(rsp
->flags
);
2818 result
= __le16_to_cpu(rsp
->result
);
2820 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2821 scid
, flags
, result
);
2823 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2828 case L2CAP_CONF_SUCCESS
:
2831 case L2CAP_CONF_UNACCEPT
:
2832 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2833 int len
= cmd
->len
- sizeof(*rsp
);
2836 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2837 l2cap_send_disconn_req(conn
, sk
);
2841 /* throw out any old stored conf requests */
2842 result
= L2CAP_CONF_SUCCESS
;
2843 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2846 l2cap_send_disconn_req(conn
, sk
);
2850 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2851 L2CAP_CONF_REQ
, len
, req
);
2852 l2cap_pi(sk
)->num_conf_req
++;
2853 if (result
!= L2CAP_CONF_SUCCESS
)
2859 sk
->sk_state
= BT_DISCONN
;
2860 sk
->sk_err
= ECONNRESET
;
2861 l2cap_sock_set_timer(sk
, HZ
* 5);
2862 l2cap_send_disconn_req(conn
, sk
);
2869 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2871 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2872 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
2873 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2874 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2876 sk
->sk_state
= BT_CONNECTED
;
2877 l2cap_pi(sk
)->next_tx_seq
= 0;
2878 l2cap_pi(sk
)->expected_tx_seq
= 0;
2879 __skb_queue_head_init(TX_QUEUE(sk
));
2880 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2881 l2cap_ertm_init(sk
);
2883 l2cap_chan_ready(sk
);
2891 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2893 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2894 struct l2cap_disconn_rsp rsp
;
2898 scid
= __le16_to_cpu(req
->scid
);
2899 dcid
= __le16_to_cpu(req
->dcid
);
2901 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2903 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2907 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2908 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2909 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2911 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2913 skb_queue_purge(TX_QUEUE(sk
));
2915 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2916 skb_queue_purge(SREJ_QUEUE(sk
));
2917 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2918 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2921 l2cap_chan_del(sk
, ECONNRESET
);
2924 l2cap_sock_kill(sk
);
2928 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2930 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2934 scid
= __le16_to_cpu(rsp
->scid
);
2935 dcid
= __le16_to_cpu(rsp
->dcid
);
2937 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2939 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2943 skb_queue_purge(TX_QUEUE(sk
));
2945 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
2946 skb_queue_purge(SREJ_QUEUE(sk
));
2947 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2948 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2951 l2cap_chan_del(sk
, 0);
2954 l2cap_sock_kill(sk
);
2958 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2960 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2963 type
= __le16_to_cpu(req
->type
);
2965 BT_DBG("type 0x%4.4x", type
);
2967 if (type
== L2CAP_IT_FEAT_MASK
) {
2969 u32 feat_mask
= l2cap_feat_mask
;
2970 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2971 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2972 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2974 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2976 put_unaligned_le32(feat_mask
, rsp
->data
);
2977 l2cap_send_cmd(conn
, cmd
->ident
,
2978 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2979 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2981 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2982 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2983 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2984 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2985 l2cap_send_cmd(conn
, cmd
->ident
,
2986 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2988 struct l2cap_info_rsp rsp
;
2989 rsp
.type
= cpu_to_le16(type
);
2990 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2991 l2cap_send_cmd(conn
, cmd
->ident
,
2992 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2998 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3000 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3003 type
= __le16_to_cpu(rsp
->type
);
3004 result
= __le16_to_cpu(rsp
->result
);
3006 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3008 del_timer(&conn
->info_timer
);
3010 if (type
== L2CAP_IT_FEAT_MASK
) {
3011 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3013 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3014 struct l2cap_info_req req
;
3015 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3017 conn
->info_ident
= l2cap_get_ident(conn
);
3019 l2cap_send_cmd(conn
, conn
->info_ident
,
3020 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3022 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3023 conn
->info_ident
= 0;
3025 l2cap_conn_start(conn
);
3027 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3028 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3029 conn
->info_ident
= 0;
3031 l2cap_conn_start(conn
);
3037 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3039 u8
*data
= skb
->data
;
3041 struct l2cap_cmd_hdr cmd
;
3044 l2cap_raw_recv(conn
, skb
);
3046 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3048 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3049 data
+= L2CAP_CMD_HDR_SIZE
;
3050 len
-= L2CAP_CMD_HDR_SIZE
;
3052 cmd_len
= le16_to_cpu(cmd
.len
);
3054 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3056 if (cmd_len
> len
|| !cmd
.ident
) {
3057 BT_DBG("corrupted command");
3062 case L2CAP_COMMAND_REJ
:
3063 l2cap_command_rej(conn
, &cmd
, data
);
3066 case L2CAP_CONN_REQ
:
3067 err
= l2cap_connect_req(conn
, &cmd
, data
);
3070 case L2CAP_CONN_RSP
:
3071 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3074 case L2CAP_CONF_REQ
:
3075 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3078 case L2CAP_CONF_RSP
:
3079 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3082 case L2CAP_DISCONN_REQ
:
3083 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3086 case L2CAP_DISCONN_RSP
:
3087 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3090 case L2CAP_ECHO_REQ
:
3091 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3094 case L2CAP_ECHO_RSP
:
3097 case L2CAP_INFO_REQ
:
3098 err
= l2cap_information_req(conn
, &cmd
, data
);
3101 case L2CAP_INFO_RSP
:
3102 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3106 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3112 struct l2cap_cmd_rej rej
;
3113 BT_DBG("error %d", err
);
3115 /* FIXME: Map err to a valid reason */
3116 rej
.reason
= cpu_to_le16(0);
3117 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3127 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3129 u16 our_fcs
, rcv_fcs
;
3130 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3132 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3133 skb_trim(skb
, skb
->len
- 2);
3134 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3135 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3137 if (our_fcs
!= rcv_fcs
)
3143 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3145 struct sk_buff
*next_skb
;
3147 bt_cb(skb
)->tx_seq
= tx_seq
;
3148 bt_cb(skb
)->sar
= sar
;
3150 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3152 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3157 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3158 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3162 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3165 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3167 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3170 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3172 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3173 struct sk_buff
*_skb
;
3176 switch (control
& L2CAP_CTRL_SAR
) {
3177 case L2CAP_SDU_UNSEGMENTED
:
3178 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3183 err
= sock_queue_rcv_skb(sk
, skb
);
3189 case L2CAP_SDU_START
:
3190 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3195 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3198 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3204 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3206 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3207 pi
->partial_sdu_len
= skb
->len
;
3211 case L2CAP_SDU_CONTINUE
:
3212 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3215 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3217 pi
->partial_sdu_len
+= skb
->len
;
3218 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3226 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3229 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3231 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3232 pi
->partial_sdu_len
+= skb
->len
;
3234 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3235 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3236 err
= sock_queue_rcv_skb(sk
, _skb
);
3250 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3252 struct sk_buff
*skb
;
3255 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3256 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3259 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3260 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3261 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3262 l2cap_pi(sk
)->buffer_seq_srej
=
3263 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3268 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3270 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3271 struct srej_list
*l
, *tmp
;
3274 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3275 if (l
->tx_seq
== tx_seq
) {
3280 control
= L2CAP_SUPER_SELECT_REJECT
;
3281 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3282 l2cap_send_sframe(pi
, control
);
3284 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3288 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3290 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3291 struct srej_list
*new;
3294 while (tx_seq
!= pi
->expected_tx_seq
) {
3295 control
= L2CAP_SUPER_SELECT_REJECT
;
3296 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3297 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3298 control
|= L2CAP_CTRL_POLL
;
3299 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3301 l2cap_send_sframe(pi
, control
);
3303 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3304 new->tx_seq
= pi
->expected_tx_seq
++;
3305 list_add_tail(&new->list
, SREJ_LIST(sk
));
3307 pi
->expected_tx_seq
++;
3310 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3312 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3313 u8 tx_seq
= __get_txseq(rx_control
);
3314 u8 req_seq
= __get_reqseq(rx_control
);
3316 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3319 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3321 pi
->expected_ack_seq
= req_seq
;
3322 l2cap_drop_acked_frames(sk
);
3324 if (tx_seq
== pi
->expected_tx_seq
)
3327 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3328 struct srej_list
*first
;
3330 first
= list_first_entry(SREJ_LIST(sk
),
3331 struct srej_list
, list
);
3332 if (tx_seq
== first
->tx_seq
) {
3333 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3334 l2cap_check_srej_gap(sk
, tx_seq
);
3336 list_del(&first
->list
);
3339 if (list_empty(SREJ_LIST(sk
))) {
3340 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3341 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3344 struct srej_list
*l
;
3345 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3347 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3348 if (l
->tx_seq
== tx_seq
) {
3349 l2cap_resend_srejframe(sk
, tx_seq
);
3353 l2cap_send_srejframe(sk
, tx_seq
);
3356 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3358 INIT_LIST_HEAD(SREJ_LIST(sk
));
3359 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3361 __skb_queue_head_init(SREJ_QUEUE(sk
));
3362 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3364 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3366 l2cap_send_srejframe(sk
, tx_seq
);
3371 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3373 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3374 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3378 if (rx_control
& L2CAP_CTRL_FINAL
) {
3379 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3380 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3382 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3383 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3384 l2cap_ertm_send(sk
);
3388 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3390 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3394 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3395 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3396 tx_control
|= L2CAP_SUPER_RCV_READY
;
3397 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3398 l2cap_send_sframe(pi
, tx_control
);
3403 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3405 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3406 u8 tx_seq
= __get_reqseq(rx_control
);
3408 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3410 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3411 case L2CAP_SUPER_RCV_READY
:
3412 if (rx_control
& L2CAP_CTRL_POLL
) {
3413 u16 control
= L2CAP_CTRL_FINAL
;
3414 control
|= L2CAP_SUPER_RCV_READY
|
3415 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3416 l2cap_send_sframe(l2cap_pi(sk
), control
);
3417 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3419 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3420 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3421 pi
->expected_ack_seq
= tx_seq
;
3422 l2cap_drop_acked_frames(sk
);
3424 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3425 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3427 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3428 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3429 l2cap_ertm_send(sk
);
3432 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3435 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3436 del_timer(&pi
->monitor_timer
);
3438 if (pi
->unacked_frames
> 0)
3439 __mod_retrans_timer();
3441 pi
->expected_ack_seq
= tx_seq
;
3442 l2cap_drop_acked_frames(sk
);
3444 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3445 (pi
->unacked_frames
> 0))
3446 __mod_retrans_timer();
3448 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3449 l2cap_ertm_send(sk
);
3453 case L2CAP_SUPER_REJECT
:
3454 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3456 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3457 l2cap_drop_acked_frames(sk
);
3459 if (rx_control
& L2CAP_CTRL_FINAL
) {
3460 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3461 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3463 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3464 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3465 l2cap_ertm_send(sk
);
3468 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3469 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3470 l2cap_ertm_send(sk
);
3472 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3473 pi
->srej_save_reqseq
= tx_seq
;
3474 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3480 case L2CAP_SUPER_SELECT_REJECT
:
3481 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3483 if (rx_control
& L2CAP_CTRL_POLL
) {
3484 pi
->expected_ack_seq
= tx_seq
;
3485 l2cap_drop_acked_frames(sk
);
3486 l2cap_retransmit_frame(sk
, tx_seq
);
3487 l2cap_ertm_send(sk
);
3488 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3489 pi
->srej_save_reqseq
= tx_seq
;
3490 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3492 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3493 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3494 pi
->srej_save_reqseq
== tx_seq
)
3495 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3497 l2cap_retransmit_frame(sk
, tx_seq
);
3500 l2cap_retransmit_frame(sk
, tx_seq
);
3501 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3502 pi
->srej_save_reqseq
= tx_seq
;
3503 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3508 case L2CAP_SUPER_RCV_NOT_READY
:
3509 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3510 pi
->expected_ack_seq
= tx_seq
;
3511 l2cap_drop_acked_frames(sk
);
3513 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3514 if (rx_control
& L2CAP_CTRL_POLL
) {
3515 u16 control
= L2CAP_CTRL_FINAL
;
3516 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3524 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3527 struct l2cap_pinfo
*pi
;
3531 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3533 BT_DBG("unknown cid 0x%4.4x", cid
);
3539 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3541 if (sk
->sk_state
!= BT_CONNECTED
)
3545 case L2CAP_MODE_BASIC
:
3546 /* If socket recv buffers overflows we drop data here
3547 * which is *bad* because L2CAP has to be reliable.
3548 * But we don't have any other choice. L2CAP doesn't
3549 * provide flow control mechanism. */
3551 if (pi
->imtu
< skb
->len
)
3554 if (!sock_queue_rcv_skb(sk
, skb
))
3558 case L2CAP_MODE_ERTM
:
3559 control
= get_unaligned_le16(skb
->data
);
3563 if (__is_sar_start(control
))
3566 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3570 * We can just drop the corrupted I-frame here.
3571 * Receiver will miss it and start proper recovery
3572 * procedures and ask retransmission.
3574 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3577 if (l2cap_check_fcs(pi
, skb
))
3580 if (__is_iframe(control
))
3581 l2cap_data_channel_iframe(sk
, control
, skb
);
3583 l2cap_data_channel_sframe(sk
, control
, skb
);
3587 case L2CAP_MODE_STREAMING
:
3588 control
= get_unaligned_le16(skb
->data
);
3592 if (__is_sar_start(control
))
3595 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3598 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3601 if (l2cap_check_fcs(pi
, skb
))
3604 tx_seq
= __get_txseq(control
);
3606 if (pi
->expected_tx_seq
== tx_seq
)
3607 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3609 pi
->expected_tx_seq
= tx_seq
+ 1;
3611 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3616 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3630 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3634 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3638 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3640 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3643 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3646 if (!sock_queue_rcv_skb(sk
, skb
))
3658 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3660 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3664 skb_pull(skb
, L2CAP_HDR_SIZE
);
3665 cid
= __le16_to_cpu(lh
->cid
);
3666 len
= __le16_to_cpu(lh
->len
);
3668 if (len
!= skb
->len
) {
3673 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3676 case L2CAP_CID_SIGNALING
:
3677 l2cap_sig_channel(conn
, skb
);
3680 case L2CAP_CID_CONN_LESS
:
3681 psm
= get_unaligned_le16(skb
->data
);
3683 l2cap_conless_channel(conn
, psm
, skb
);
3687 l2cap_data_channel(conn
, cid
, skb
);
3692 /* ---- L2CAP interface with lower layer (HCI) ---- */
3694 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3696 int exact
= 0, lm1
= 0, lm2
= 0;
3697 register struct sock
*sk
;
3698 struct hlist_node
*node
;
3700 if (type
!= ACL_LINK
)
3703 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3705 /* Find listening sockets and check their link_mode */
3706 read_lock(&l2cap_sk_list
.lock
);
3707 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3708 if (sk
->sk_state
!= BT_LISTEN
)
3711 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3712 lm1
|= HCI_LM_ACCEPT
;
3713 if (l2cap_pi(sk
)->role_switch
)
3714 lm1
|= HCI_LM_MASTER
;
3716 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3717 lm2
|= HCI_LM_ACCEPT
;
3718 if (l2cap_pi(sk
)->role_switch
)
3719 lm2
|= HCI_LM_MASTER
;
3722 read_unlock(&l2cap_sk_list
.lock
);
3724 return exact
? lm1
: lm2
;
3727 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3729 struct l2cap_conn
*conn
;
3731 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3733 if (hcon
->type
!= ACL_LINK
)
3737 conn
= l2cap_conn_add(hcon
, status
);
3739 l2cap_conn_ready(conn
);
3741 l2cap_conn_del(hcon
, bt_err(status
));
3746 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3748 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3750 BT_DBG("hcon %p", hcon
);
3752 if (hcon
->type
!= ACL_LINK
|| !conn
)
3755 return conn
->disc_reason
;
3758 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3760 BT_DBG("hcon %p reason %d", hcon
, reason
);
3762 if (hcon
->type
!= ACL_LINK
)
3765 l2cap_conn_del(hcon
, bt_err(reason
));
3770 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3772 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3775 if (encrypt
== 0x00) {
3776 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3777 l2cap_sock_clear_timer(sk
);
3778 l2cap_sock_set_timer(sk
, HZ
* 5);
3779 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3780 __l2cap_sock_close(sk
, ECONNREFUSED
);
3782 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3783 l2cap_sock_clear_timer(sk
);
3787 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3789 struct l2cap_chan_list
*l
;
3790 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3796 l
= &conn
->chan_list
;
3798 BT_DBG("conn %p", conn
);
3800 read_lock(&l
->lock
);
3802 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3805 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3810 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3811 sk
->sk_state
== BT_CONFIG
)) {
3812 l2cap_check_encryption(sk
, encrypt
);
3817 if (sk
->sk_state
== BT_CONNECT
) {
3819 struct l2cap_conn_req req
;
3820 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3821 req
.psm
= l2cap_pi(sk
)->psm
;
3823 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3825 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3826 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3828 l2cap_sock_clear_timer(sk
);
3829 l2cap_sock_set_timer(sk
, HZ
/ 10);
3831 } else if (sk
->sk_state
== BT_CONNECT2
) {
3832 struct l2cap_conn_rsp rsp
;
3836 sk
->sk_state
= BT_CONFIG
;
3837 result
= L2CAP_CR_SUCCESS
;
3839 sk
->sk_state
= BT_DISCONN
;
3840 l2cap_sock_set_timer(sk
, HZ
/ 10);
3841 result
= L2CAP_CR_SEC_BLOCK
;
3844 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3845 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3846 rsp
.result
= cpu_to_le16(result
);
3847 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3848 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3849 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3855 read_unlock(&l
->lock
);
3860 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3862 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3864 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3867 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3869 if (flags
& ACL_START
) {
3870 struct l2cap_hdr
*hdr
;
3874 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3875 kfree_skb(conn
->rx_skb
);
3876 conn
->rx_skb
= NULL
;
3878 l2cap_conn_unreliable(conn
, ECOMM
);
3882 BT_ERR("Frame is too short (len %d)", skb
->len
);
3883 l2cap_conn_unreliable(conn
, ECOMM
);
3887 hdr
= (struct l2cap_hdr
*) skb
->data
;
3888 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3890 if (len
== skb
->len
) {
3891 /* Complete frame received */
3892 l2cap_recv_frame(conn
, skb
);
3896 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3898 if (skb
->len
> len
) {
3899 BT_ERR("Frame is too long (len %d, expected len %d)",
3901 l2cap_conn_unreliable(conn
, ECOMM
);
3905 /* Allocate skb for the complete frame (with header) */
3906 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3910 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3912 conn
->rx_len
= len
- skb
->len
;
3914 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3916 if (!conn
->rx_len
) {
3917 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3918 l2cap_conn_unreliable(conn
, ECOMM
);
3922 if (skb
->len
> conn
->rx_len
) {
3923 BT_ERR("Fragment is too long (len %d, expected %d)",
3924 skb
->len
, conn
->rx_len
);
3925 kfree_skb(conn
->rx_skb
);
3926 conn
->rx_skb
= NULL
;
3928 l2cap_conn_unreliable(conn
, ECOMM
);
3932 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3934 conn
->rx_len
-= skb
->len
;
3936 if (!conn
->rx_len
) {
3937 /* Complete frame received */
3938 l2cap_recv_frame(conn
, conn
->rx_skb
);
3939 conn
->rx_skb
= NULL
;
3948 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
3951 struct hlist_node
*node
;
3953 read_lock_bh(&l2cap_sk_list
.lock
);
3955 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3956 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3958 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3959 batostr(&bt_sk(sk
)->src
),
3960 batostr(&bt_sk(sk
)->dst
),
3961 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
3963 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3966 read_unlock_bh(&l2cap_sk_list
.lock
);
3971 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
3973 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
3976 static const struct file_operations l2cap_debugfs_fops
= {
3977 .open
= l2cap_debugfs_open
,
3979 .llseek
= seq_lseek
,
3980 .release
= single_release
,
3983 static struct dentry
*l2cap_debugfs
;
3985 static const struct proto_ops l2cap_sock_ops
= {
3986 .family
= PF_BLUETOOTH
,
3987 .owner
= THIS_MODULE
,
3988 .release
= l2cap_sock_release
,
3989 .bind
= l2cap_sock_bind
,
3990 .connect
= l2cap_sock_connect
,
3991 .listen
= l2cap_sock_listen
,
3992 .accept
= l2cap_sock_accept
,
3993 .getname
= l2cap_sock_getname
,
3994 .sendmsg
= l2cap_sock_sendmsg
,
3995 .recvmsg
= l2cap_sock_recvmsg
,
3996 .poll
= bt_sock_poll
,
3997 .ioctl
= bt_sock_ioctl
,
3998 .mmap
= sock_no_mmap
,
3999 .socketpair
= sock_no_socketpair
,
4000 .shutdown
= l2cap_sock_shutdown
,
4001 .setsockopt
= l2cap_sock_setsockopt
,
4002 .getsockopt
= l2cap_sock_getsockopt
4005 static const struct net_proto_family l2cap_sock_family_ops
= {
4006 .family
= PF_BLUETOOTH
,
4007 .owner
= THIS_MODULE
,
4008 .create
= l2cap_sock_create
,
4011 static struct hci_proto l2cap_hci_proto
= {
4013 .id
= HCI_PROTO_L2CAP
,
4014 .connect_ind
= l2cap_connect_ind
,
4015 .connect_cfm
= l2cap_connect_cfm
,
4016 .disconn_ind
= l2cap_disconn_ind
,
4017 .disconn_cfm
= l2cap_disconn_cfm
,
4018 .security_cfm
= l2cap_security_cfm
,
4019 .recv_acldata
= l2cap_recv_acldata
4022 static int __init
l2cap_init(void)
4026 err
= proto_register(&l2cap_proto
, 0);
4030 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4032 BT_ERR("L2CAP socket registration failed");
4036 err
= hci_register_proto(&l2cap_hci_proto
);
4038 BT_ERR("L2CAP protocol registration failed");
4039 bt_sock_unregister(BTPROTO_L2CAP
);
4044 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4045 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4047 BT_ERR("Failed to create L2CAP debug file");
4050 BT_INFO("L2CAP ver %s", VERSION
);
4051 BT_INFO("L2CAP socket layer initialized");
4056 proto_unregister(&l2cap_proto
);
4060 static void __exit
l2cap_exit(void)
4062 debugfs_remove(l2cap_debugfs
);
4064 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4065 BT_ERR("L2CAP socket unregistration failed");
4067 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4068 BT_ERR("L2CAP protocol unregistration failed");
4070 proto_unregister(&l2cap_proto
);
4073 void l2cap_load(void)
4075 /* Dummy function to trigger automatic L2CAP module loading by
4076 * other modules that use L2CAP sockets but don't use any other
4077 * symbols from it. */
4080 EXPORT_SYMBOL(l2cap_load
);
4082 module_init(l2cap_init
);
4083 module_exit(l2cap_exit
);
4085 module_param(enable_ertm
, bool, 0644);
4086 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4088 module_param(max_transmit
, uint
, 0644);
4089 MODULE_PARM_DESC(max_transmit
, "Max transmit value (default = 3)");
4091 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4092 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4093 MODULE_VERSION(VERSION
);
4094 MODULE_LICENSE("GPL");
4095 MODULE_ALIAS("bt-proto-0");