2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm
;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops
;
67 static struct workqueue_struct
*_busy_wq
;
69 static struct bt_sock_list l2cap_sk_list
= {
70 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
73 static void l2cap_busy_work(struct work_struct
*work
);
75 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
76 static void l2cap_sock_close(struct sock
*sk
);
77 static void l2cap_sock_kill(struct sock
*sk
);
79 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
80 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
81 u8 code
, u8 ident
, u16 dlen
, void *data
);
83 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
88 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
89 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
92 static void l2cap_sock_clear_timer(struct sock
*sk
)
94 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
95 sk_stop_timer(sk
, &sk
->sk_timer
);
98 static void l2cap_sock_timeout(unsigned long arg
)
100 struct sock
*sk
= (struct sock
*) arg
;
103 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
107 if (sock_owned_by_user(sk
)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk
, HZ
/ 5);
115 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
116 reason
= ECONNREFUSED
;
117 else if (sk
->sk_state
== BT_CONNECT
&&
118 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
119 reason
= ECONNREFUSED
;
123 __l2cap_sock_close(sk
, reason
);
131 /* ---- L2CAP channels ---- */
132 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
135 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
136 if (l2cap_pi(s
)->dcid
== cid
)
142 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
145 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
146 if (l2cap_pi(s
)->scid
== cid
)
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
158 s
= __l2cap_get_chan_by_scid(l
, cid
);
161 read_unlock(&l
->lock
);
165 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
168 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
169 if (l2cap_pi(s
)->ident
== ident
)
175 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
179 s
= __l2cap_get_chan_by_ident(l
, ident
);
182 read_unlock(&l
->lock
);
186 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
188 u16 cid
= L2CAP_CID_DYN_START
;
190 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
191 if (!__l2cap_get_chan_by_scid(l
, cid
))
198 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
203 l2cap_pi(l
->head
)->prev_c
= sk
;
205 l2cap_pi(sk
)->next_c
= l
->head
;
206 l2cap_pi(sk
)->prev_c
= NULL
;
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
212 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
214 write_lock_bh(&l
->lock
);
219 l2cap_pi(next
)->prev_c
= prev
;
221 l2cap_pi(prev
)->next_c
= next
;
222 write_unlock_bh(&l
->lock
);
227 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
229 struct l2cap_chan_list
*l
= &conn
->chan_list
;
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
232 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
234 conn
->disc_reason
= 0x13;
236 l2cap_pi(sk
)->conn
= conn
;
238 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
241 } else if (sk
->sk_type
== SOCK_DGRAM
) {
242 /* Connectionless socket */
243 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
244 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
245 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
249 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
250 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
253 __l2cap_chan_link(l
, sk
);
256 bt_accept_enqueue(parent
, sk
);
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock
*sk
, int err
)
263 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
264 struct sock
*parent
= bt_sk(sk
)->parent
;
266 l2cap_sock_clear_timer(sk
);
268 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn
->chan_list
, sk
);
273 l2cap_pi(sk
)->conn
= NULL
;
274 hci_conn_put(conn
->hcon
);
277 sk
->sk_state
= BT_CLOSED
;
278 sock_set_flag(sk
, SOCK_ZAPPED
);
284 bt_accept_unlink(sk
);
285 parent
->sk_data_ready(parent
, 0);
287 sk
->sk_state_change(sk
);
289 skb_queue_purge(TX_QUEUE(sk
));
291 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
292 struct srej_list
*l
, *tmp
;
294 del_timer(&l2cap_pi(sk
)->retrans_timer
);
295 del_timer(&l2cap_pi(sk
)->monitor_timer
);
296 del_timer(&l2cap_pi(sk
)->ack_timer
);
298 skb_queue_purge(SREJ_QUEUE(sk
));
299 skb_queue_purge(BUSY_QUEUE(sk
));
301 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
308 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
310 if (sk
->sk_type
== SOCK_RAW
) {
311 switch (l2cap_pi(sk
)->sec_level
) {
312 case BT_SECURITY_HIGH
:
313 return HCI_AT_DEDICATED_BONDING_MITM
;
314 case BT_SECURITY_MEDIUM
:
315 return HCI_AT_DEDICATED_BONDING
;
317 return HCI_AT_NO_BONDING
;
319 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
321 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
323 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
324 return HCI_AT_NO_BONDING_MITM
;
326 return HCI_AT_NO_BONDING
;
328 switch (l2cap_pi(sk
)->sec_level
) {
329 case BT_SECURITY_HIGH
:
330 return HCI_AT_GENERAL_BONDING_MITM
;
331 case BT_SECURITY_MEDIUM
:
332 return HCI_AT_GENERAL_BONDING
;
334 return HCI_AT_NO_BONDING
;
339 /* Service level security */
340 static inline int l2cap_check_security(struct sock
*sk
)
342 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
345 auth_type
= l2cap_get_auth_type(sk
);
347 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
351 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
355 /* Get next available identificator.
356 * 1 - 128 are used by kernel.
357 * 129 - 199 are reserved.
358 * 200 - 254 are used by utilities like l2ping, etc.
361 spin_lock_bh(&conn
->lock
);
363 if (++conn
->tx_ident
> 128)
368 spin_unlock_bh(&conn
->lock
);
373 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
375 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
377 BT_DBG("code 0x%2.2x", code
);
382 hci_send_acl(conn
->hcon
, skb
, 0);
385 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
388 struct l2cap_hdr
*lh
;
389 struct l2cap_conn
*conn
= pi
->conn
;
390 struct sock
*sk
= (struct sock
*)pi
;
391 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
393 if (sk
->sk_state
!= BT_CONNECTED
)
396 if (pi
->fcs
== L2CAP_FCS_CRC16
)
399 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
401 count
= min_t(unsigned int, conn
->mtu
, hlen
);
402 control
|= L2CAP_CTRL_FRAME_TYPE
;
404 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
405 control
|= L2CAP_CTRL_FINAL
;
406 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
409 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
410 control
|= L2CAP_CTRL_POLL
;
411 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
414 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
418 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
419 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
420 lh
->cid
= cpu_to_le16(pi
->dcid
);
421 put_unaligned_le16(control
, skb_put(skb
, 2));
423 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
424 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
425 put_unaligned_le16(fcs
, skb_put(skb
, 2));
428 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
431 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
433 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
434 control
|= L2CAP_SUPER_RCV_NOT_READY
;
435 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
437 control
|= L2CAP_SUPER_RCV_READY
;
439 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
441 l2cap_send_sframe(pi
, control
);
444 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
446 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
449 static void l2cap_do_start(struct sock
*sk
)
451 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
453 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
454 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
457 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
458 struct l2cap_conn_req req
;
459 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
460 req
.psm
= l2cap_pi(sk
)->psm
;
462 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
463 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
465 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
466 L2CAP_CONN_REQ
, sizeof(req
), &req
);
469 struct l2cap_info_req req
;
470 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
472 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
473 conn
->info_ident
= l2cap_get_ident(conn
);
475 mod_timer(&conn
->info_timer
, jiffies
+
476 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
478 l2cap_send_cmd(conn
, conn
->info_ident
,
479 L2CAP_INFO_REQ
, sizeof(req
), &req
);
483 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
485 u32 local_feat_mask
= l2cap_feat_mask
;
487 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
490 case L2CAP_MODE_ERTM
:
491 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
492 case L2CAP_MODE_STREAMING
:
493 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
499 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
501 struct l2cap_disconn_req req
;
506 skb_queue_purge(TX_QUEUE(sk
));
508 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
509 del_timer(&l2cap_pi(sk
)->retrans_timer
);
510 del_timer(&l2cap_pi(sk
)->monitor_timer
);
511 del_timer(&l2cap_pi(sk
)->ack_timer
);
514 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
515 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
516 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
517 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
519 sk
->sk_state
= BT_DISCONN
;
523 /* ---- L2CAP connections ---- */
524 static void l2cap_conn_start(struct l2cap_conn
*conn
)
526 struct l2cap_chan_list
*l
= &conn
->chan_list
;
527 struct sock_del_list del
, *tmp1
, *tmp2
;
530 BT_DBG("conn %p", conn
);
532 INIT_LIST_HEAD(&del
.list
);
536 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
539 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
540 sk
->sk_type
!= SOCK_STREAM
) {
545 if (sk
->sk_state
== BT_CONNECT
) {
546 struct l2cap_conn_req req
;
548 if (!l2cap_check_security(sk
) ||
549 !__l2cap_no_conn_pending(sk
)) {
554 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
556 && l2cap_pi(sk
)->conf_state
&
557 L2CAP_CONF_STATE2_DEVICE
) {
558 tmp1
= kzalloc(sizeof(struct sock_del_list
),
561 list_add_tail(&tmp1
->list
, &del
.list
);
566 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
567 req
.psm
= l2cap_pi(sk
)->psm
;
569 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
570 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
572 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
573 L2CAP_CONN_REQ
, sizeof(req
), &req
);
575 } else if (sk
->sk_state
== BT_CONNECT2
) {
576 struct l2cap_conn_rsp rsp
;
578 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
579 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
581 if (l2cap_check_security(sk
)) {
582 if (bt_sk(sk
)->defer_setup
) {
583 struct sock
*parent
= bt_sk(sk
)->parent
;
584 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
585 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
586 parent
->sk_data_ready(parent
, 0);
589 sk
->sk_state
= BT_CONFIG
;
590 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
591 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
594 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
595 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
598 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
599 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
601 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
602 rsp
.result
!= L2CAP_CR_SUCCESS
) {
607 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
608 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
609 l2cap_build_conf_req(sk
, buf
), buf
);
610 l2cap_pi(sk
)->num_conf_req
++;
616 read_unlock(&l
->lock
);
618 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
619 bh_lock_sock(tmp1
->sk
);
620 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
621 bh_unlock_sock(tmp1
->sk
);
622 list_del(&tmp1
->list
);
627 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
629 struct l2cap_chan_list
*l
= &conn
->chan_list
;
632 BT_DBG("conn %p", conn
);
636 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
639 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
640 sk
->sk_type
!= SOCK_STREAM
) {
641 l2cap_sock_clear_timer(sk
);
642 sk
->sk_state
= BT_CONNECTED
;
643 sk
->sk_state_change(sk
);
644 } else if (sk
->sk_state
== BT_CONNECT
)
650 read_unlock(&l
->lock
);
653 /* Notify sockets that we cannot guaranty reliability anymore */
654 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
656 struct l2cap_chan_list
*l
= &conn
->chan_list
;
659 BT_DBG("conn %p", conn
);
663 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
664 if (l2cap_pi(sk
)->force_reliable
)
668 read_unlock(&l
->lock
);
671 static void l2cap_info_timeout(unsigned long arg
)
673 struct l2cap_conn
*conn
= (void *) arg
;
675 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
676 conn
->info_ident
= 0;
678 l2cap_conn_start(conn
);
681 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
683 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
688 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
692 hcon
->l2cap_data
= conn
;
695 BT_DBG("hcon %p conn %p", hcon
, conn
);
697 conn
->mtu
= hcon
->hdev
->acl_mtu
;
698 conn
->src
= &hcon
->hdev
->bdaddr
;
699 conn
->dst
= &hcon
->dst
;
703 spin_lock_init(&conn
->lock
);
704 rwlock_init(&conn
->chan_list
.lock
);
706 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
707 (unsigned long) conn
);
709 conn
->disc_reason
= 0x13;
714 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
716 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
722 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
724 kfree_skb(conn
->rx_skb
);
727 while ((sk
= conn
->chan_list
.head
)) {
729 l2cap_chan_del(sk
, err
);
734 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
735 del_timer_sync(&conn
->info_timer
);
737 hcon
->l2cap_data
= NULL
;
741 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
743 struct l2cap_chan_list
*l
= &conn
->chan_list
;
744 write_lock_bh(&l
->lock
);
745 __l2cap_chan_add(conn
, sk
, parent
);
746 write_unlock_bh(&l
->lock
);
749 /* ---- Socket interface ---- */
750 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
753 struct hlist_node
*node
;
754 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
755 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
762 /* Find socket with psm and source bdaddr.
763 * Returns closest match.
765 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
767 struct sock
*sk
= NULL
, *sk1
= NULL
;
768 struct hlist_node
*node
;
770 read_lock(&l2cap_sk_list
.lock
);
772 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
773 if (state
&& sk
->sk_state
!= state
)
776 if (l2cap_pi(sk
)->psm
== psm
) {
778 if (!bacmp(&bt_sk(sk
)->src
, src
))
782 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
787 read_unlock(&l2cap_sk_list
.lock
);
789 return node
? sk
: sk1
;
792 static void l2cap_sock_destruct(struct sock
*sk
)
796 skb_queue_purge(&sk
->sk_receive_queue
);
797 skb_queue_purge(&sk
->sk_write_queue
);
800 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
804 BT_DBG("parent %p", parent
);
806 /* Close not yet accepted channels */
807 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
808 l2cap_sock_close(sk
);
810 parent
->sk_state
= BT_CLOSED
;
811 sock_set_flag(parent
, SOCK_ZAPPED
);
814 /* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
817 static void l2cap_sock_kill(struct sock
*sk
)
819 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
822 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list
, sk
);
826 sock_set_flag(sk
, SOCK_DEAD
);
830 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
832 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
834 switch (sk
->sk_state
) {
836 l2cap_sock_cleanup_listen(sk
);
841 if (sk
->sk_type
== SOCK_SEQPACKET
||
842 sk
->sk_type
== SOCK_STREAM
) {
843 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
845 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
846 l2cap_send_disconn_req(conn
, sk
, reason
);
848 l2cap_chan_del(sk
, reason
);
852 if (sk
->sk_type
== SOCK_SEQPACKET
||
853 sk
->sk_type
== SOCK_STREAM
) {
854 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
855 struct l2cap_conn_rsp rsp
;
858 if (bt_sk(sk
)->defer_setup
)
859 result
= L2CAP_CR_SEC_BLOCK
;
861 result
= L2CAP_CR_BAD_PSM
;
862 sk
->sk_state
= BT_DISCONN
;
864 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
865 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
866 rsp
.result
= cpu_to_le16(result
);
867 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
868 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
869 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
871 l2cap_chan_del(sk
, reason
);
876 l2cap_chan_del(sk
, reason
);
880 sock_set_flag(sk
, SOCK_ZAPPED
);
885 /* Must be called on unlocked socket. */
886 static void l2cap_sock_close(struct sock
*sk
)
888 l2cap_sock_clear_timer(sk
);
890 __l2cap_sock_close(sk
, ECONNRESET
);
895 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
897 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
902 sk
->sk_type
= parent
->sk_type
;
903 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
905 pi
->imtu
= l2cap_pi(parent
)->imtu
;
906 pi
->omtu
= l2cap_pi(parent
)->omtu
;
907 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
908 pi
->mode
= l2cap_pi(parent
)->mode
;
909 pi
->fcs
= l2cap_pi(parent
)->fcs
;
910 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
911 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
912 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
913 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
914 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
916 pi
->imtu
= L2CAP_DEFAULT_MTU
;
918 if (!disable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
919 pi
->mode
= L2CAP_MODE_ERTM
;
920 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
922 pi
->mode
= L2CAP_MODE_BASIC
;
924 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
925 pi
->fcs
= L2CAP_FCS_CRC16
;
926 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
927 pi
->sec_level
= BT_SECURITY_LOW
;
929 pi
->force_reliable
= 0;
932 /* Default config options */
934 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
935 skb_queue_head_init(TX_QUEUE(sk
));
936 skb_queue_head_init(SREJ_QUEUE(sk
));
937 skb_queue_head_init(BUSY_QUEUE(sk
));
938 INIT_LIST_HEAD(SREJ_LIST(sk
));
941 static struct proto l2cap_proto
= {
943 .owner
= THIS_MODULE
,
944 .obj_size
= sizeof(struct l2cap_pinfo
)
947 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
951 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
955 sock_init_data(sock
, sk
);
956 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
958 sk
->sk_destruct
= l2cap_sock_destruct
;
959 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
961 sock_reset_flag(sk
, SOCK_ZAPPED
);
963 sk
->sk_protocol
= proto
;
964 sk
->sk_state
= BT_OPEN
;
966 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
968 bt_sock_link(&l2cap_sk_list
, sk
);
972 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
977 BT_DBG("sock %p", sock
);
979 sock
->state
= SS_UNCONNECTED
;
981 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
982 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
983 return -ESOCKTNOSUPPORT
;
985 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
988 sock
->ops
= &l2cap_sock_ops
;
990 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
994 l2cap_sock_init(sk
, NULL
);
998 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
1000 struct sock
*sk
= sock
->sk
;
1001 struct sockaddr_l2 la
;
1004 BT_DBG("sk %p", sk
);
1006 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1009 memset(&la
, 0, sizeof(la
));
1010 len
= min_t(unsigned int, sizeof(la
), alen
);
1011 memcpy(&la
, addr
, len
);
1018 if (sk
->sk_state
!= BT_OPEN
) {
1024 __u16 psm
= __le16_to_cpu(la
.l2_psm
);
1026 /* PSM must be odd and lsb of upper byte must be 0 */
1027 if ((psm
& 0x0101) != 0x0001) {
1032 /* Restrict usage of well-known PSMs */
1033 if (psm
< 0x1001 && !capable(CAP_NET_BIND_SERVICE
)) {
1039 write_lock_bh(&l2cap_sk_list
.lock
);
1041 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1044 /* Save source address */
1045 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1046 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1047 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1048 sk
->sk_state
= BT_BOUND
;
1050 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1051 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1052 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1055 write_unlock_bh(&l2cap_sk_list
.lock
);
1062 static int l2cap_do_connect(struct sock
*sk
)
1064 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1065 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1066 struct l2cap_conn
*conn
;
1067 struct hci_conn
*hcon
;
1068 struct hci_dev
*hdev
;
1072 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1075 hdev
= hci_get_route(dst
, src
);
1077 return -EHOSTUNREACH
;
1079 hci_dev_lock_bh(hdev
);
1083 auth_type
= l2cap_get_auth_type(sk
);
1085 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1086 l2cap_pi(sk
)->sec_level
, auth_type
);
1090 conn
= l2cap_conn_add(hcon
, 0);
1098 /* Update source addr of the socket */
1099 bacpy(src
, conn
->src
);
1101 l2cap_chan_add(conn
, sk
, NULL
);
1103 sk
->sk_state
= BT_CONNECT
;
1104 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1106 if (hcon
->state
== BT_CONNECTED
) {
1107 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1108 sk
->sk_type
!= SOCK_STREAM
) {
1109 l2cap_sock_clear_timer(sk
);
1110 if (l2cap_check_security(sk
))
1111 sk
->sk_state
= BT_CONNECTED
;
1117 hci_dev_unlock_bh(hdev
);
1122 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1124 struct sock
*sk
= sock
->sk
;
1125 struct sockaddr_l2 la
;
1128 BT_DBG("sk %p", sk
);
1130 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1131 addr
->sa_family
!= AF_BLUETOOTH
)
1134 memset(&la
, 0, sizeof(la
));
1135 len
= min_t(unsigned int, sizeof(la
), alen
);
1136 memcpy(&la
, addr
, len
);
1143 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1149 switch (l2cap_pi(sk
)->mode
) {
1150 case L2CAP_MODE_BASIC
:
1152 case L2CAP_MODE_ERTM
:
1153 case L2CAP_MODE_STREAMING
:
1162 switch (sk
->sk_state
) {
1166 /* Already connecting */
1170 /* Already connected */
1184 /* PSM must be odd and lsb of upper byte must be 0 */
1185 if ((__le16_to_cpu(la
.l2_psm
) & 0x0101) != 0x0001 &&
1186 sk
->sk_type
!= SOCK_RAW
) {
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1193 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1195 err
= l2cap_do_connect(sk
);
1200 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1201 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1207 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1209 struct sock
*sk
= sock
->sk
;
1212 BT_DBG("sk %p backlog %d", sk
, backlog
);
1216 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1217 || sk
->sk_state
!= BT_BOUND
) {
1222 switch (l2cap_pi(sk
)->mode
) {
1223 case L2CAP_MODE_BASIC
:
1225 case L2CAP_MODE_ERTM
:
1226 case L2CAP_MODE_STREAMING
:
1235 if (!l2cap_pi(sk
)->psm
) {
1236 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1241 write_lock_bh(&l2cap_sk_list
.lock
);
1243 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1245 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1246 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1251 write_unlock_bh(&l2cap_sk_list
.lock
);
1257 sk
->sk_max_ack_backlog
= backlog
;
1258 sk
->sk_ack_backlog
= 0;
1259 sk
->sk_state
= BT_LISTEN
;
1266 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1268 DECLARE_WAITQUEUE(wait
, current
);
1269 struct sock
*sk
= sock
->sk
, *nsk
;
1273 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1275 if (sk
->sk_state
!= BT_LISTEN
) {
1280 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1282 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1286 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1287 set_current_state(TASK_INTERRUPTIBLE
);
1294 timeo
= schedule_timeout(timeo
);
1295 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1297 if (sk
->sk_state
!= BT_LISTEN
) {
1302 if (signal_pending(current
)) {
1303 err
= sock_intr_errno(timeo
);
1307 set_current_state(TASK_RUNNING
);
1308 remove_wait_queue(sk_sleep(sk
), &wait
);
1313 newsock
->state
= SS_CONNECTED
;
1315 BT_DBG("new socket %p", nsk
);
1322 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1324 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1325 struct sock
*sk
= sock
->sk
;
1327 BT_DBG("sock %p, sk %p", sock
, sk
);
1329 addr
->sa_family
= AF_BLUETOOTH
;
1330 *len
= sizeof(struct sockaddr_l2
);
1333 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1334 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1335 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1337 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1338 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1339 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1345 static int __l2cap_wait_ack(struct sock
*sk
)
1347 DECLARE_WAITQUEUE(wait
, current
);
1351 add_wait_queue(sk_sleep(sk
), &wait
);
1352 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1353 set_current_state(TASK_INTERRUPTIBLE
);
1358 if (signal_pending(current
)) {
1359 err
= sock_intr_errno(timeo
);
1364 timeo
= schedule_timeout(timeo
);
1367 err
= sock_error(sk
);
1371 set_current_state(TASK_RUNNING
);
1372 remove_wait_queue(sk_sleep(sk
), &wait
);
1376 static void l2cap_monitor_timeout(unsigned long arg
)
1378 struct sock
*sk
= (void *) arg
;
1380 BT_DBG("sk %p", sk
);
1383 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1384 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1389 l2cap_pi(sk
)->retry_count
++;
1390 __mod_monitor_timer();
1392 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1396 static void l2cap_retrans_timeout(unsigned long arg
)
1398 struct sock
*sk
= (void *) arg
;
1400 BT_DBG("sk %p", sk
);
1403 l2cap_pi(sk
)->retry_count
= 1;
1404 __mod_monitor_timer();
1406 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1408 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1412 static void l2cap_drop_acked_frames(struct sock
*sk
)
1414 struct sk_buff
*skb
;
1416 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1417 l2cap_pi(sk
)->unacked_frames
) {
1418 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1421 skb
= skb_dequeue(TX_QUEUE(sk
));
1424 l2cap_pi(sk
)->unacked_frames
--;
1427 if (!l2cap_pi(sk
)->unacked_frames
)
1428 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1431 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1433 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1435 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1437 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1440 static void l2cap_streaming_send(struct sock
*sk
)
1442 struct sk_buff
*skb
;
1443 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1446 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1447 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1448 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1449 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1451 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1452 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1453 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1456 l2cap_do_send(sk
, skb
);
1458 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1462 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1464 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1465 struct sk_buff
*skb
, *tx_skb
;
1468 skb
= skb_peek(TX_QUEUE(sk
));
1473 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1476 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1479 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1481 if (pi
->remote_max_tx
&&
1482 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1483 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1487 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1488 bt_cb(skb
)->retries
++;
1489 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1491 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1492 control
|= L2CAP_CTRL_FINAL
;
1493 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1496 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1497 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1499 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1501 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1502 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1503 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1506 l2cap_do_send(sk
, tx_skb
);
1509 static int l2cap_ertm_send(struct sock
*sk
)
1511 struct sk_buff
*skb
, *tx_skb
;
1512 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1516 if (sk
->sk_state
!= BT_CONNECTED
)
1519 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1521 if (pi
->remote_max_tx
&&
1522 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1523 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1527 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1529 bt_cb(skb
)->retries
++;
1531 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1532 control
&= L2CAP_CTRL_SAR
;
1534 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1535 control
|= L2CAP_CTRL_FINAL
;
1536 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1538 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1539 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1540 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1543 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1544 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1545 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1548 l2cap_do_send(sk
, tx_skb
);
1550 __mod_retrans_timer();
1552 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1553 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1555 pi
->unacked_frames
++;
1558 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1559 sk
->sk_send_head
= NULL
;
1561 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1569 static int l2cap_retransmit_frames(struct sock
*sk
)
1571 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1574 if (!skb_queue_empty(TX_QUEUE(sk
)))
1575 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1577 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1578 ret
= l2cap_ertm_send(sk
);
1582 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1584 struct sock
*sk
= (struct sock
*)pi
;
1587 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1589 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1590 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1591 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1592 l2cap_send_sframe(pi
, control
);
1596 if (l2cap_ertm_send(sk
) > 0)
1599 control
|= L2CAP_SUPER_RCV_READY
;
1600 l2cap_send_sframe(pi
, control
);
1603 static void l2cap_send_srejtail(struct sock
*sk
)
1605 struct srej_list
*tail
;
1608 control
= L2CAP_SUPER_SELECT_REJECT
;
1609 control
|= L2CAP_CTRL_FINAL
;
1611 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1612 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1614 l2cap_send_sframe(l2cap_pi(sk
), control
);
1617 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1619 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1620 struct sk_buff
**frag
;
1623 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1629 /* Continuation fragments (no L2CAP header) */
1630 frag
= &skb_shinfo(skb
)->frag_list
;
1632 count
= min_t(unsigned int, conn
->mtu
, len
);
1634 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1637 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1643 frag
= &(*frag
)->next
;
1649 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1651 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1652 struct sk_buff
*skb
;
1653 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1654 struct l2cap_hdr
*lh
;
1656 BT_DBG("sk %p len %d", sk
, (int)len
);
1658 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1659 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1660 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1662 return ERR_PTR(err
);
1664 /* Create L2CAP header */
1665 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1666 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1667 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1668 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1670 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1671 if (unlikely(err
< 0)) {
1673 return ERR_PTR(err
);
1678 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1680 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1681 struct sk_buff
*skb
;
1682 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1683 struct l2cap_hdr
*lh
;
1685 BT_DBG("sk %p len %d", sk
, (int)len
);
1687 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1688 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1689 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1691 return ERR_PTR(err
);
1693 /* Create L2CAP header */
1694 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1695 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1696 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1698 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1699 if (unlikely(err
< 0)) {
1701 return ERR_PTR(err
);
1706 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1708 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1709 struct sk_buff
*skb
;
1710 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1711 struct l2cap_hdr
*lh
;
1713 BT_DBG("sk %p len %d", sk
, (int)len
);
1716 return ERR_PTR(-ENOTCONN
);
1721 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1724 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1725 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1726 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1728 return ERR_PTR(err
);
1730 /* Create L2CAP header */
1731 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1732 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1733 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1734 put_unaligned_le16(control
, skb_put(skb
, 2));
1736 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1738 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1739 if (unlikely(err
< 0)) {
1741 return ERR_PTR(err
);
1744 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1745 put_unaligned_le16(0, skb_put(skb
, 2));
1747 bt_cb(skb
)->retries
= 0;
1751 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1753 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1754 struct sk_buff
*skb
;
1755 struct sk_buff_head sar_queue
;
1759 skb_queue_head_init(&sar_queue
);
1760 control
= L2CAP_SDU_START
;
1761 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1763 return PTR_ERR(skb
);
1765 __skb_queue_tail(&sar_queue
, skb
);
1766 len
-= pi
->remote_mps
;
1767 size
+= pi
->remote_mps
;
1772 if (len
> pi
->remote_mps
) {
1773 control
= L2CAP_SDU_CONTINUE
;
1774 buflen
= pi
->remote_mps
;
1776 control
= L2CAP_SDU_END
;
1780 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1782 skb_queue_purge(&sar_queue
);
1783 return PTR_ERR(skb
);
1786 __skb_queue_tail(&sar_queue
, skb
);
1790 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1791 if (sk
->sk_send_head
== NULL
)
1792 sk
->sk_send_head
= sar_queue
.next
;
1797 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1799 struct sock
*sk
= sock
->sk
;
1800 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1801 struct sk_buff
*skb
;
1805 BT_DBG("sock %p, sk %p", sock
, sk
);
1807 err
= sock_error(sk
);
1811 if (msg
->msg_flags
& MSG_OOB
)
1816 if (sk
->sk_state
!= BT_CONNECTED
) {
1821 /* Connectionless channel */
1822 if (sk
->sk_type
== SOCK_DGRAM
) {
1823 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1827 l2cap_do_send(sk
, skb
);
1834 case L2CAP_MODE_BASIC
:
1835 /* Check outgoing MTU */
1836 if (len
> pi
->omtu
) {
1841 /* Create a basic PDU */
1842 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1848 l2cap_do_send(sk
, skb
);
1852 case L2CAP_MODE_ERTM
:
1853 case L2CAP_MODE_STREAMING
:
1854 /* Entire SDU fits into one PDU */
1855 if (len
<= pi
->remote_mps
) {
1856 control
= L2CAP_SDU_UNSEGMENTED
;
1857 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1862 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1864 if (sk
->sk_send_head
== NULL
)
1865 sk
->sk_send_head
= skb
;
1868 /* Segment SDU into multiples PDUs */
1869 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1874 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1875 l2cap_streaming_send(sk
);
1877 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
1878 (pi
->conn_state
& L2CAP_CONN_WAIT_F
)) {
1882 err
= l2cap_ertm_send(sk
);
1890 BT_DBG("bad state %1.1x", pi
->mode
);
1899 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1901 struct sock
*sk
= sock
->sk
;
1905 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1906 struct l2cap_conn_rsp rsp
;
1907 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1910 sk
->sk_state
= BT_CONFIG
;
1912 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1913 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1914 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1915 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1916 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1917 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1919 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1924 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1925 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1926 l2cap_build_conf_req(sk
, buf
), buf
);
1927 l2cap_pi(sk
)->num_conf_req
++;
1935 if (sock
->type
== SOCK_STREAM
)
1936 return bt_sock_stream_recvmsg(iocb
, sock
, msg
, len
, flags
);
1938 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1941 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1943 struct sock
*sk
= sock
->sk
;
1944 struct l2cap_options opts
;
1948 BT_DBG("sk %p", sk
);
1954 if (sk
->sk_state
== BT_CONNECTED
) {
1959 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1960 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1961 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1962 opts
.mode
= l2cap_pi(sk
)->mode
;
1963 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1964 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1965 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1967 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1968 if (copy_from_user((char *) &opts
, optval
, len
)) {
1973 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1978 l2cap_pi(sk
)->mode
= opts
.mode
;
1979 switch (l2cap_pi(sk
)->mode
) {
1980 case L2CAP_MODE_BASIC
:
1981 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1983 case L2CAP_MODE_ERTM
:
1984 case L2CAP_MODE_STREAMING
:
1993 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1994 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1995 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1996 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1997 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
2001 if (get_user(opt
, (u32 __user
*) optval
)) {
2006 if (opt
& L2CAP_LM_AUTH
)
2007 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
2008 if (opt
& L2CAP_LM_ENCRYPT
)
2009 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
2010 if (opt
& L2CAP_LM_SECURE
)
2011 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
2013 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
2014 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
2026 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2028 struct sock
*sk
= sock
->sk
;
2029 struct bt_security sec
;
2033 BT_DBG("sk %p", sk
);
2035 if (level
== SOL_L2CAP
)
2036 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2038 if (level
!= SOL_BLUETOOTH
)
2039 return -ENOPROTOOPT
;
2045 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2046 && sk
->sk_type
!= SOCK_RAW
) {
2051 sec
.level
= BT_SECURITY_LOW
;
2053 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2054 if (copy_from_user((char *) &sec
, optval
, len
)) {
2059 if (sec
.level
< BT_SECURITY_LOW
||
2060 sec
.level
> BT_SECURITY_HIGH
) {
2065 l2cap_pi(sk
)->sec_level
= sec
.level
;
2068 case BT_DEFER_SETUP
:
2069 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2074 if (get_user(opt
, (u32 __user
*) optval
)) {
2079 bt_sk(sk
)->defer_setup
= opt
;
2091 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2093 struct sock
*sk
= sock
->sk
;
2094 struct l2cap_options opts
;
2095 struct l2cap_conninfo cinfo
;
2099 BT_DBG("sk %p", sk
);
2101 if (get_user(len
, optlen
))
2108 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2109 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2110 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2111 opts
.mode
= l2cap_pi(sk
)->mode
;
2112 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2113 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2114 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2116 len
= min_t(unsigned int, len
, sizeof(opts
));
2117 if (copy_to_user(optval
, (char *) &opts
, len
))
2123 switch (l2cap_pi(sk
)->sec_level
) {
2124 case BT_SECURITY_LOW
:
2125 opt
= L2CAP_LM_AUTH
;
2127 case BT_SECURITY_MEDIUM
:
2128 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2130 case BT_SECURITY_HIGH
:
2131 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2139 if (l2cap_pi(sk
)->role_switch
)
2140 opt
|= L2CAP_LM_MASTER
;
2142 if (l2cap_pi(sk
)->force_reliable
)
2143 opt
|= L2CAP_LM_RELIABLE
;
2145 if (put_user(opt
, (u32 __user
*) optval
))
2149 case L2CAP_CONNINFO
:
2150 if (sk
->sk_state
!= BT_CONNECTED
&&
2151 !(sk
->sk_state
== BT_CONNECT2
&&
2152 bt_sk(sk
)->defer_setup
)) {
2157 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2158 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2160 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2161 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2175 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2177 struct sock
*sk
= sock
->sk
;
2178 struct bt_security sec
;
2181 BT_DBG("sk %p", sk
);
2183 if (level
== SOL_L2CAP
)
2184 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2186 if (level
!= SOL_BLUETOOTH
)
2187 return -ENOPROTOOPT
;
2189 if (get_user(len
, optlen
))
2196 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2197 && sk
->sk_type
!= SOCK_RAW
) {
2202 sec
.level
= l2cap_pi(sk
)->sec_level
;
2204 len
= min_t(unsigned int, len
, sizeof(sec
));
2205 if (copy_to_user(optval
, (char *) &sec
, len
))
2210 case BT_DEFER_SETUP
:
2211 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2216 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2230 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2232 struct sock
*sk
= sock
->sk
;
2235 BT_DBG("sock %p, sk %p", sock
, sk
);
2241 if (!sk
->sk_shutdown
) {
2242 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2243 err
= __l2cap_wait_ack(sk
);
2245 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2246 l2cap_sock_clear_timer(sk
);
2247 __l2cap_sock_close(sk
, 0);
2249 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2250 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2254 if (!err
&& sk
->sk_err
)
2261 static int l2cap_sock_release(struct socket
*sock
)
2263 struct sock
*sk
= sock
->sk
;
2266 BT_DBG("sock %p, sk %p", sock
, sk
);
2271 err
= l2cap_sock_shutdown(sock
, 2);
2274 l2cap_sock_kill(sk
);
2278 static void l2cap_chan_ready(struct sock
*sk
)
2280 struct sock
*parent
= bt_sk(sk
)->parent
;
2282 BT_DBG("sk %p, parent %p", sk
, parent
);
2284 l2cap_pi(sk
)->conf_state
= 0;
2285 l2cap_sock_clear_timer(sk
);
2288 /* Outgoing channel.
2289 * Wake up socket sleeping on connect.
2291 sk
->sk_state
= BT_CONNECTED
;
2292 sk
->sk_state_change(sk
);
2294 /* Incoming channel.
2295 * Wake up socket sleeping on accept.
2297 parent
->sk_data_ready(parent
, 0);
2301 /* Copy frame to all raw sockets on that connection */
2302 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2304 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2305 struct sk_buff
*nskb
;
2308 BT_DBG("conn %p", conn
);
2310 read_lock(&l
->lock
);
2311 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2312 if (sk
->sk_type
!= SOCK_RAW
)
2315 /* Don't send frame to the socket it came from */
2318 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2322 if (sock_queue_rcv_skb(sk
, nskb
))
2325 read_unlock(&l
->lock
);
2328 /* ---- L2CAP signalling commands ---- */
2329 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2330 u8 code
, u8 ident
, u16 dlen
, void *data
)
2332 struct sk_buff
*skb
, **frag
;
2333 struct l2cap_cmd_hdr
*cmd
;
2334 struct l2cap_hdr
*lh
;
2337 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2338 conn
, code
, ident
, dlen
);
2340 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2341 count
= min_t(unsigned int, conn
->mtu
, len
);
2343 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2347 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2348 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2349 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2351 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2354 cmd
->len
= cpu_to_le16(dlen
);
2357 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2358 memcpy(skb_put(skb
, count
), data
, count
);
2364 /* Continuation fragments (no L2CAP header) */
2365 frag
= &skb_shinfo(skb
)->frag_list
;
2367 count
= min_t(unsigned int, conn
->mtu
, len
);
2369 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2373 memcpy(skb_put(*frag
, count
), data
, count
);
2378 frag
= &(*frag
)->next
;
2388 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2390 struct l2cap_conf_opt
*opt
= *ptr
;
2393 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2401 *val
= *((u8
*) opt
->val
);
2405 *val
= get_unaligned_le16(opt
->val
);
2409 *val
= get_unaligned_le32(opt
->val
);
2413 *val
= (unsigned long) opt
->val
;
2417 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2421 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2423 struct l2cap_conf_opt
*opt
= *ptr
;
2425 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2432 *((u8
*) opt
->val
) = val
;
2436 put_unaligned_le16(val
, opt
->val
);
2440 put_unaligned_le32(val
, opt
->val
);
2444 memcpy(opt
->val
, (void *) val
, len
);
2448 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2451 static void l2cap_ack_timeout(unsigned long arg
)
2453 struct sock
*sk
= (void *) arg
;
2456 l2cap_send_ack(l2cap_pi(sk
));
2460 static inline void l2cap_ertm_init(struct sock
*sk
)
2462 l2cap_pi(sk
)->expected_ack_seq
= 0;
2463 l2cap_pi(sk
)->unacked_frames
= 0;
2464 l2cap_pi(sk
)->buffer_seq
= 0;
2465 l2cap_pi(sk
)->num_acked
= 0;
2466 l2cap_pi(sk
)->frames_sent
= 0;
2468 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2469 l2cap_retrans_timeout
, (unsigned long) sk
);
2470 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2471 l2cap_monitor_timeout
, (unsigned long) sk
);
2472 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2473 l2cap_ack_timeout
, (unsigned long) sk
);
2475 __skb_queue_head_init(SREJ_QUEUE(sk
));
2476 __skb_queue_head_init(BUSY_QUEUE(sk
));
2478 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2480 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2483 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2486 case L2CAP_MODE_STREAMING
:
2487 case L2CAP_MODE_ERTM
:
2488 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2492 return L2CAP_MODE_BASIC
;
2496 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2498 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2499 struct l2cap_conf_req
*req
= data
;
2500 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2501 void *ptr
= req
->data
;
2503 BT_DBG("sk %p", sk
);
2505 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2509 case L2CAP_MODE_STREAMING
:
2510 case L2CAP_MODE_ERTM
:
2511 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2516 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2522 case L2CAP_MODE_BASIC
:
2523 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2524 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2526 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2527 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2530 rfc
.mode
= L2CAP_MODE_BASIC
;
2532 rfc
.max_transmit
= 0;
2533 rfc
.retrans_timeout
= 0;
2534 rfc
.monitor_timeout
= 0;
2535 rfc
.max_pdu_size
= 0;
2537 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2538 (unsigned long) &rfc
);
2541 case L2CAP_MODE_ERTM
:
2542 rfc
.mode
= L2CAP_MODE_ERTM
;
2543 rfc
.txwin_size
= pi
->tx_win
;
2544 rfc
.max_transmit
= pi
->max_tx
;
2545 rfc
.retrans_timeout
= 0;
2546 rfc
.monitor_timeout
= 0;
2547 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2548 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2549 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2551 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2552 (unsigned long) &rfc
);
2554 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2557 if (pi
->fcs
== L2CAP_FCS_NONE
||
2558 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2559 pi
->fcs
= L2CAP_FCS_NONE
;
2560 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2564 case L2CAP_MODE_STREAMING
:
2565 rfc
.mode
= L2CAP_MODE_STREAMING
;
2567 rfc
.max_transmit
= 0;
2568 rfc
.retrans_timeout
= 0;
2569 rfc
.monitor_timeout
= 0;
2570 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2571 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2572 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2574 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2575 (unsigned long) &rfc
);
2577 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2580 if (pi
->fcs
== L2CAP_FCS_NONE
||
2581 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2582 pi
->fcs
= L2CAP_FCS_NONE
;
2583 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2588 /* FIXME: Need actual value of the flush timeout */
2589 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2590 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2592 req
->dcid
= cpu_to_le16(pi
->dcid
);
2593 req
->flags
= cpu_to_le16(0);
2598 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2600 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2601 struct l2cap_conf_rsp
*rsp
= data
;
2602 void *ptr
= rsp
->data
;
2603 void *req
= pi
->conf_req
;
2604 int len
= pi
->conf_len
;
2605 int type
, hint
, olen
;
2607 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2608 u16 mtu
= L2CAP_DEFAULT_MTU
;
2609 u16 result
= L2CAP_CONF_SUCCESS
;
2611 BT_DBG("sk %p", sk
);
2613 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2614 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2616 hint
= type
& L2CAP_CONF_HINT
;
2617 type
&= L2CAP_CONF_MASK
;
2620 case L2CAP_CONF_MTU
:
2624 case L2CAP_CONF_FLUSH_TO
:
2628 case L2CAP_CONF_QOS
:
2631 case L2CAP_CONF_RFC
:
2632 if (olen
== sizeof(rfc
))
2633 memcpy(&rfc
, (void *) val
, olen
);
2636 case L2CAP_CONF_FCS
:
2637 if (val
== L2CAP_FCS_NONE
)
2638 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2646 result
= L2CAP_CONF_UNKNOWN
;
2647 *((u8
*) ptr
++) = type
;
2652 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
2656 case L2CAP_MODE_STREAMING
:
2657 case L2CAP_MODE_ERTM
:
2658 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2659 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2660 pi
->conn
->feat_mask
);
2664 if (pi
->mode
!= rfc
.mode
)
2665 return -ECONNREFUSED
;
2671 if (pi
->mode
!= rfc
.mode
) {
2672 result
= L2CAP_CONF_UNACCEPT
;
2673 rfc
.mode
= pi
->mode
;
2675 if (pi
->num_conf_rsp
== 1)
2676 return -ECONNREFUSED
;
2678 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2679 sizeof(rfc
), (unsigned long) &rfc
);
2683 if (result
== L2CAP_CONF_SUCCESS
) {
2684 /* Configure output options and let the other side know
2685 * which ones we don't like. */
2687 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2688 result
= L2CAP_CONF_UNACCEPT
;
2691 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2693 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2696 case L2CAP_MODE_BASIC
:
2697 pi
->fcs
= L2CAP_FCS_NONE
;
2698 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2701 case L2CAP_MODE_ERTM
:
2702 pi
->remote_tx_win
= rfc
.txwin_size
;
2703 pi
->remote_max_tx
= rfc
.max_transmit
;
2705 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2706 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2708 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2710 rfc
.retrans_timeout
=
2711 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2712 rfc
.monitor_timeout
=
2713 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2715 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2717 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2718 sizeof(rfc
), (unsigned long) &rfc
);
2722 case L2CAP_MODE_STREAMING
:
2723 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2724 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2726 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2728 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2730 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2731 sizeof(rfc
), (unsigned long) &rfc
);
2736 result
= L2CAP_CONF_UNACCEPT
;
2738 memset(&rfc
, 0, sizeof(rfc
));
2739 rfc
.mode
= pi
->mode
;
2742 if (result
== L2CAP_CONF_SUCCESS
)
2743 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2745 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2746 rsp
->result
= cpu_to_le16(result
);
2747 rsp
->flags
= cpu_to_le16(0x0000);
2752 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2754 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2755 struct l2cap_conf_req
*req
= data
;
2756 void *ptr
= req
->data
;
2759 struct l2cap_conf_rfc rfc
;
2761 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2763 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2764 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2767 case L2CAP_CONF_MTU
:
2768 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2769 *result
= L2CAP_CONF_UNACCEPT
;
2770 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2773 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2776 case L2CAP_CONF_FLUSH_TO
:
2778 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2782 case L2CAP_CONF_RFC
:
2783 if (olen
== sizeof(rfc
))
2784 memcpy(&rfc
, (void *)val
, olen
);
2786 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2787 rfc
.mode
!= pi
->mode
)
2788 return -ECONNREFUSED
;
2792 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2793 sizeof(rfc
), (unsigned long) &rfc
);
2798 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2799 return -ECONNREFUSED
;
2801 pi
->mode
= rfc
.mode
;
2803 if (*result
== L2CAP_CONF_SUCCESS
) {
2805 case L2CAP_MODE_ERTM
:
2806 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2807 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2808 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2810 case L2CAP_MODE_STREAMING
:
2811 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2815 req
->dcid
= cpu_to_le16(pi
->dcid
);
2816 req
->flags
= cpu_to_le16(0x0000);
2821 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2823 struct l2cap_conf_rsp
*rsp
= data
;
2824 void *ptr
= rsp
->data
;
2826 BT_DBG("sk %p", sk
);
2828 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2829 rsp
->result
= cpu_to_le16(result
);
2830 rsp
->flags
= cpu_to_le16(flags
);
2835 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2837 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2840 struct l2cap_conf_rfc rfc
;
2842 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2844 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2847 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2848 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2851 case L2CAP_CONF_RFC
:
2852 if (olen
== sizeof(rfc
))
2853 memcpy(&rfc
, (void *)val
, olen
);
2860 case L2CAP_MODE_ERTM
:
2861 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2862 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2863 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2865 case L2CAP_MODE_STREAMING
:
2866 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2870 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2872 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2874 if (rej
->reason
!= 0x0000)
2877 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2878 cmd
->ident
== conn
->info_ident
) {
2879 del_timer(&conn
->info_timer
);
2881 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2882 conn
->info_ident
= 0;
2884 l2cap_conn_start(conn
);
2890 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2892 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2893 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2894 struct l2cap_conn_rsp rsp
;
2895 struct sock
*parent
, *sk
= NULL
;
2896 int result
, status
= L2CAP_CS_NO_INFO
;
2898 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2899 __le16 psm
= req
->psm
;
2901 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2903 /* Check if we have socket listening on psm */
2904 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2906 result
= L2CAP_CR_BAD_PSM
;
2910 bh_lock_sock(parent
);
2912 /* Check if the ACL is secure enough (if not SDP) */
2913 if (psm
!= cpu_to_le16(0x0001) &&
2914 !hci_conn_check_link_mode(conn
->hcon
)) {
2915 conn
->disc_reason
= 0x05;
2916 result
= L2CAP_CR_SEC_BLOCK
;
2920 result
= L2CAP_CR_NO_MEM
;
2922 /* Check for backlog size */
2923 if (sk_acceptq_is_full(parent
)) {
2924 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2928 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2932 write_lock_bh(&list
->lock
);
2934 /* Check if we already have channel with that dcid */
2935 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2936 write_unlock_bh(&list
->lock
);
2937 sock_set_flag(sk
, SOCK_ZAPPED
);
2938 l2cap_sock_kill(sk
);
2942 hci_conn_hold(conn
->hcon
);
2944 l2cap_sock_init(sk
, parent
);
2945 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2946 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2947 l2cap_pi(sk
)->psm
= psm
;
2948 l2cap_pi(sk
)->dcid
= scid
;
2950 __l2cap_chan_add(conn
, sk
, parent
);
2951 dcid
= l2cap_pi(sk
)->scid
;
2953 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2955 l2cap_pi(sk
)->ident
= cmd
->ident
;
2957 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2958 if (l2cap_check_security(sk
)) {
2959 if (bt_sk(sk
)->defer_setup
) {
2960 sk
->sk_state
= BT_CONNECT2
;
2961 result
= L2CAP_CR_PEND
;
2962 status
= L2CAP_CS_AUTHOR_PEND
;
2963 parent
->sk_data_ready(parent
, 0);
2965 sk
->sk_state
= BT_CONFIG
;
2966 result
= L2CAP_CR_SUCCESS
;
2967 status
= L2CAP_CS_NO_INFO
;
2970 sk
->sk_state
= BT_CONNECT2
;
2971 result
= L2CAP_CR_PEND
;
2972 status
= L2CAP_CS_AUTHEN_PEND
;
2975 sk
->sk_state
= BT_CONNECT2
;
2976 result
= L2CAP_CR_PEND
;
2977 status
= L2CAP_CS_NO_INFO
;
2980 write_unlock_bh(&list
->lock
);
2983 bh_unlock_sock(parent
);
2986 rsp
.scid
= cpu_to_le16(scid
);
2987 rsp
.dcid
= cpu_to_le16(dcid
);
2988 rsp
.result
= cpu_to_le16(result
);
2989 rsp
.status
= cpu_to_le16(status
);
2990 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2992 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2993 struct l2cap_info_req info
;
2994 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2996 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2997 conn
->info_ident
= l2cap_get_ident(conn
);
2999 mod_timer(&conn
->info_timer
, jiffies
+
3000 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
3002 l2cap_send_cmd(conn
, conn
->info_ident
,
3003 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3006 if (sk
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
3007 result
== L2CAP_CR_SUCCESS
) {
3009 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3010 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3011 l2cap_build_conf_req(sk
, buf
), buf
);
3012 l2cap_pi(sk
)->num_conf_req
++;
3018 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3020 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3021 u16 scid
, dcid
, result
, status
;
3025 scid
= __le16_to_cpu(rsp
->scid
);
3026 dcid
= __le16_to_cpu(rsp
->dcid
);
3027 result
= __le16_to_cpu(rsp
->result
);
3028 status
= __le16_to_cpu(rsp
->status
);
3030 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
3033 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3037 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
3043 case L2CAP_CR_SUCCESS
:
3044 sk
->sk_state
= BT_CONFIG
;
3045 l2cap_pi(sk
)->ident
= 0;
3046 l2cap_pi(sk
)->dcid
= dcid
;
3047 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3049 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
3052 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3054 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3055 l2cap_build_conf_req(sk
, req
), req
);
3056 l2cap_pi(sk
)->num_conf_req
++;
3060 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3064 /* don't delete l2cap channel if sk is owned by user */
3065 if (sock_owned_by_user(sk
)) {
3066 sk
->sk_state
= BT_DISCONN
;
3067 l2cap_sock_clear_timer(sk
);
3068 l2cap_sock_set_timer(sk
, HZ
/ 5);
3072 l2cap_chan_del(sk
, ECONNREFUSED
);
3080 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
3082 /* FCS is enabled only in ERTM or streaming mode, if one or both
3085 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
3086 pi
->fcs
= L2CAP_FCS_NONE
;
3087 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
3088 pi
->fcs
= L2CAP_FCS_CRC16
;
3091 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3093 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3099 dcid
= __le16_to_cpu(req
->dcid
);
3100 flags
= __le16_to_cpu(req
->flags
);
3102 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3104 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3108 if (sk
->sk_state
!= BT_CONFIG
) {
3109 struct l2cap_cmd_rej rej
;
3111 rej
.reason
= cpu_to_le16(0x0002);
3112 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3117 /* Reject if config buffer is too small. */
3118 len
= cmd_len
- sizeof(*req
);
3119 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3120 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3121 l2cap_build_conf_rsp(sk
, rsp
,
3122 L2CAP_CONF_REJECT
, flags
), rsp
);
3127 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3128 l2cap_pi(sk
)->conf_len
+= len
;
3130 if (flags
& 0x0001) {
3131 /* Incomplete config. Send empty response. */
3132 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3133 l2cap_build_conf_rsp(sk
, rsp
,
3134 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3138 /* Complete config. */
3139 len
= l2cap_parse_conf_req(sk
, rsp
);
3141 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3145 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3146 l2cap_pi(sk
)->num_conf_rsp
++;
3148 /* Reset config buffer. */
3149 l2cap_pi(sk
)->conf_len
= 0;
3151 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3154 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3155 set_default_fcs(l2cap_pi(sk
));
3157 sk
->sk_state
= BT_CONNECTED
;
3159 l2cap_pi(sk
)->next_tx_seq
= 0;
3160 l2cap_pi(sk
)->expected_tx_seq
= 0;
3161 __skb_queue_head_init(TX_QUEUE(sk
));
3162 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3163 l2cap_ertm_init(sk
);
3165 l2cap_chan_ready(sk
);
3169 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3171 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3172 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3173 l2cap_build_conf_req(sk
, buf
), buf
);
3174 l2cap_pi(sk
)->num_conf_req
++;
3182 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3184 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3185 u16 scid
, flags
, result
;
3187 int len
= cmd
->len
- sizeof(*rsp
);
3189 scid
= __le16_to_cpu(rsp
->scid
);
3190 flags
= __le16_to_cpu(rsp
->flags
);
3191 result
= __le16_to_cpu(rsp
->result
);
3193 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3194 scid
, flags
, result
);
3196 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3201 case L2CAP_CONF_SUCCESS
:
3202 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3205 case L2CAP_CONF_UNACCEPT
:
3206 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3209 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3210 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3214 /* throw out any old stored conf requests */
3215 result
= L2CAP_CONF_SUCCESS
;
3216 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3219 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3223 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3224 L2CAP_CONF_REQ
, len
, req
);
3225 l2cap_pi(sk
)->num_conf_req
++;
3226 if (result
!= L2CAP_CONF_SUCCESS
)
3232 sk
->sk_err
= ECONNRESET
;
3233 l2cap_sock_set_timer(sk
, HZ
* 5);
3234 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3241 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3243 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3244 set_default_fcs(l2cap_pi(sk
));
3246 sk
->sk_state
= BT_CONNECTED
;
3247 l2cap_pi(sk
)->next_tx_seq
= 0;
3248 l2cap_pi(sk
)->expected_tx_seq
= 0;
3249 __skb_queue_head_init(TX_QUEUE(sk
));
3250 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3251 l2cap_ertm_init(sk
);
3253 l2cap_chan_ready(sk
);
3261 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3263 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3264 struct l2cap_disconn_rsp rsp
;
3268 scid
= __le16_to_cpu(req
->scid
);
3269 dcid
= __le16_to_cpu(req
->dcid
);
3271 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3273 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3277 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3278 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3279 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3281 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3283 /* don't delete l2cap channel if sk is owned by user */
3284 if (sock_owned_by_user(sk
)) {
3285 sk
->sk_state
= BT_DISCONN
;
3286 l2cap_sock_clear_timer(sk
);
3287 l2cap_sock_set_timer(sk
, HZ
/ 5);
3292 l2cap_chan_del(sk
, ECONNRESET
);
3295 l2cap_sock_kill(sk
);
3299 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3301 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3305 scid
= __le16_to_cpu(rsp
->scid
);
3306 dcid
= __le16_to_cpu(rsp
->dcid
);
3308 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3310 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3314 /* don't delete l2cap channel if sk is owned by user */
3315 if (sock_owned_by_user(sk
)) {
3316 sk
->sk_state
= BT_DISCONN
;
3317 l2cap_sock_clear_timer(sk
);
3318 l2cap_sock_set_timer(sk
, HZ
/ 5);
3323 l2cap_chan_del(sk
, 0);
3326 l2cap_sock_kill(sk
);
3330 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3332 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3335 type
= __le16_to_cpu(req
->type
);
3337 BT_DBG("type 0x%4.4x", type
);
3339 if (type
== L2CAP_IT_FEAT_MASK
) {
3341 u32 feat_mask
= l2cap_feat_mask
;
3342 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3343 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3344 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3346 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3348 put_unaligned_le32(feat_mask
, rsp
->data
);
3349 l2cap_send_cmd(conn
, cmd
->ident
,
3350 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3351 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3353 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3354 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3355 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3356 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3357 l2cap_send_cmd(conn
, cmd
->ident
,
3358 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3360 struct l2cap_info_rsp rsp
;
3361 rsp
.type
= cpu_to_le16(type
);
3362 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3363 l2cap_send_cmd(conn
, cmd
->ident
,
3364 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3370 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3372 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3375 type
= __le16_to_cpu(rsp
->type
);
3376 result
= __le16_to_cpu(rsp
->result
);
3378 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3380 del_timer(&conn
->info_timer
);
3382 if (result
!= L2CAP_IR_SUCCESS
) {
3383 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3384 conn
->info_ident
= 0;
3386 l2cap_conn_start(conn
);
3391 if (type
== L2CAP_IT_FEAT_MASK
) {
3392 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3394 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3395 struct l2cap_info_req req
;
3396 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3398 conn
->info_ident
= l2cap_get_ident(conn
);
3400 l2cap_send_cmd(conn
, conn
->info_ident
,
3401 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3403 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3404 conn
->info_ident
= 0;
3406 l2cap_conn_start(conn
);
3408 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3409 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3410 conn
->info_ident
= 0;
3412 l2cap_conn_start(conn
);
3418 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3420 u8
*data
= skb
->data
;
3422 struct l2cap_cmd_hdr cmd
;
3425 l2cap_raw_recv(conn
, skb
);
3427 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3429 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3430 data
+= L2CAP_CMD_HDR_SIZE
;
3431 len
-= L2CAP_CMD_HDR_SIZE
;
3433 cmd_len
= le16_to_cpu(cmd
.len
);
3435 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3437 if (cmd_len
> len
|| !cmd
.ident
) {
3438 BT_DBG("corrupted command");
3443 case L2CAP_COMMAND_REJ
:
3444 l2cap_command_rej(conn
, &cmd
, data
);
3447 case L2CAP_CONN_REQ
:
3448 err
= l2cap_connect_req(conn
, &cmd
, data
);
3451 case L2CAP_CONN_RSP
:
3452 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3455 case L2CAP_CONF_REQ
:
3456 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3459 case L2CAP_CONF_RSP
:
3460 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3463 case L2CAP_DISCONN_REQ
:
3464 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3467 case L2CAP_DISCONN_RSP
:
3468 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3471 case L2CAP_ECHO_REQ
:
3472 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3475 case L2CAP_ECHO_RSP
:
3478 case L2CAP_INFO_REQ
:
3479 err
= l2cap_information_req(conn
, &cmd
, data
);
3482 case L2CAP_INFO_RSP
:
3483 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3487 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3493 struct l2cap_cmd_rej rej
;
3494 BT_DBG("error %d", err
);
3496 /* FIXME: Map err to a valid reason */
3497 rej
.reason
= cpu_to_le16(0);
3498 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3508 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3510 u16 our_fcs
, rcv_fcs
;
3511 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3513 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3514 skb_trim(skb
, skb
->len
- 2);
3515 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3516 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3518 if (our_fcs
!= rcv_fcs
)
3524 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3526 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3529 pi
->frames_sent
= 0;
3531 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3533 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3534 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3535 l2cap_send_sframe(pi
, control
);
3536 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3539 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3540 l2cap_retransmit_frames(sk
);
3542 l2cap_ertm_send(sk
);
3544 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3545 pi
->frames_sent
== 0) {
3546 control
|= L2CAP_SUPER_RCV_READY
;
3547 l2cap_send_sframe(pi
, control
);
3551 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3553 struct sk_buff
*next_skb
;
3554 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3555 int tx_seq_offset
, next_tx_seq_offset
;
3557 bt_cb(skb
)->tx_seq
= tx_seq
;
3558 bt_cb(skb
)->sar
= sar
;
3560 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3562 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3566 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3567 if (tx_seq_offset
< 0)
3568 tx_seq_offset
+= 64;
3571 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3574 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3575 pi
->buffer_seq
) % 64;
3576 if (next_tx_seq_offset
< 0)
3577 next_tx_seq_offset
+= 64;
3579 if (next_tx_seq_offset
> tx_seq_offset
) {
3580 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3584 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3587 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3589 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3594 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3596 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3597 struct sk_buff
*_skb
;
3600 switch (control
& L2CAP_CTRL_SAR
) {
3601 case L2CAP_SDU_UNSEGMENTED
:
3602 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3605 err
= sock_queue_rcv_skb(sk
, skb
);
3611 case L2CAP_SDU_START
:
3612 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3615 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3617 if (pi
->sdu_len
> pi
->imtu
)
3620 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3624 /* pull sdu_len bytes only after alloc, because of Local Busy
3625 * condition we have to be sure that this will be executed
3626 * only once, i.e., when alloc does not fail */
3629 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3631 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3632 pi
->partial_sdu_len
= skb
->len
;
3635 case L2CAP_SDU_CONTINUE
:
3636 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3642 pi
->partial_sdu_len
+= skb
->len
;
3643 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3646 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3651 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3657 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3658 pi
->partial_sdu_len
+= skb
->len
;
3660 if (pi
->partial_sdu_len
> pi
->imtu
)
3663 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3666 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3669 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3671 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3675 err
= sock_queue_rcv_skb(sk
, _skb
);
3678 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3682 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3683 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3697 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3702 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3704 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3705 struct sk_buff
*skb
;
3709 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3710 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3711 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3713 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3717 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3720 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3723 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3724 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3725 l2cap_send_sframe(pi
, control
);
3726 l2cap_pi(sk
)->retry_count
= 1;
3728 del_timer(&pi
->retrans_timer
);
3729 __mod_monitor_timer();
3731 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3734 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3735 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3737 BT_DBG("sk %p, Exit local busy", sk
);
3742 static void l2cap_busy_work(struct work_struct
*work
)
3744 DECLARE_WAITQUEUE(wait
, current
);
3745 struct l2cap_pinfo
*pi
=
3746 container_of(work
, struct l2cap_pinfo
, busy_work
);
3747 struct sock
*sk
= (struct sock
*)pi
;
3748 int n_tries
= 0, timeo
= HZ
/5, err
;
3749 struct sk_buff
*skb
;
3753 add_wait_queue(sk_sleep(sk
), &wait
);
3754 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3755 set_current_state(TASK_INTERRUPTIBLE
);
3757 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3759 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3766 if (signal_pending(current
)) {
3767 err
= sock_intr_errno(timeo
);
3772 timeo
= schedule_timeout(timeo
);
3775 err
= sock_error(sk
);
3779 if (l2cap_try_push_rx_skb(sk
) == 0)
3783 set_current_state(TASK_RUNNING
);
3784 remove_wait_queue(sk_sleep(sk
), &wait
);
3789 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3791 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3794 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3795 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3796 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3797 return l2cap_try_push_rx_skb(sk
);
3802 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3804 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3808 /* Busy Condition */
3809 BT_DBG("sk %p, Enter local busy", sk
);
3811 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3812 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3813 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3815 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3816 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3817 l2cap_send_sframe(pi
, sctrl
);
3819 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3821 del_timer(&pi
->ack_timer
);
3823 queue_work(_busy_wq
, &pi
->busy_work
);
3828 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3830 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3831 struct sk_buff
*_skb
;
3835 * TODO: We have to notify the userland if some data is lost with the
3839 switch (control
& L2CAP_CTRL_SAR
) {
3840 case L2CAP_SDU_UNSEGMENTED
:
3841 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3846 err
= sock_queue_rcv_skb(sk
, skb
);
3852 case L2CAP_SDU_START
:
3853 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3858 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3861 if (pi
->sdu_len
> pi
->imtu
) {
3866 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3872 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3874 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3875 pi
->partial_sdu_len
= skb
->len
;
3879 case L2CAP_SDU_CONTINUE
:
3880 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3883 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3885 pi
->partial_sdu_len
+= skb
->len
;
3886 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3894 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3897 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3899 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3900 pi
->partial_sdu_len
+= skb
->len
;
3902 if (pi
->partial_sdu_len
> pi
->imtu
)
3905 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3906 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3907 err
= sock_queue_rcv_skb(sk
, _skb
);
3922 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3924 struct sk_buff
*skb
;
3927 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3928 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3931 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3932 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3933 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3934 l2cap_pi(sk
)->buffer_seq_srej
=
3935 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3936 tx_seq
= (tx_seq
+ 1) % 64;
3940 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3942 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3943 struct srej_list
*l
, *tmp
;
3946 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3947 if (l
->tx_seq
== tx_seq
) {
3952 control
= L2CAP_SUPER_SELECT_REJECT
;
3953 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3954 l2cap_send_sframe(pi
, control
);
3956 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3960 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3962 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3963 struct srej_list
*new;
3966 while (tx_seq
!= pi
->expected_tx_seq
) {
3967 control
= L2CAP_SUPER_SELECT_REJECT
;
3968 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3969 l2cap_send_sframe(pi
, control
);
3971 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3972 new->tx_seq
= pi
->expected_tx_seq
;
3973 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3974 list_add_tail(&new->list
, SREJ_LIST(sk
));
3976 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3979 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3981 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3982 u8 tx_seq
= __get_txseq(rx_control
);
3983 u8 req_seq
= __get_reqseq(rx_control
);
3984 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3985 int tx_seq_offset
, expected_tx_seq_offset
;
3986 int num_to_ack
= (pi
->tx_win
/6) + 1;
3989 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3992 if (L2CAP_CTRL_FINAL
& rx_control
&&
3993 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3994 del_timer(&pi
->monitor_timer
);
3995 if (pi
->unacked_frames
> 0)
3996 __mod_retrans_timer();
3997 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4000 pi
->expected_ack_seq
= req_seq
;
4001 l2cap_drop_acked_frames(sk
);
4003 if (tx_seq
== pi
->expected_tx_seq
)
4006 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
4007 if (tx_seq_offset
< 0)
4008 tx_seq_offset
+= 64;
4010 /* invalid tx_seq */
4011 if (tx_seq_offset
>= pi
->tx_win
) {
4012 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4016 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
4019 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4020 struct srej_list
*first
;
4022 first
= list_first_entry(SREJ_LIST(sk
),
4023 struct srej_list
, list
);
4024 if (tx_seq
== first
->tx_seq
) {
4025 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4026 l2cap_check_srej_gap(sk
, tx_seq
);
4028 list_del(&first
->list
);
4031 if (list_empty(SREJ_LIST(sk
))) {
4032 pi
->buffer_seq
= pi
->buffer_seq_srej
;
4033 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
4035 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
4038 struct srej_list
*l
;
4040 /* duplicated tx_seq */
4041 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
4044 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
4045 if (l
->tx_seq
== tx_seq
) {
4046 l2cap_resend_srejframe(sk
, tx_seq
);
4050 l2cap_send_srejframe(sk
, tx_seq
);
4053 expected_tx_seq_offset
=
4054 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
4055 if (expected_tx_seq_offset
< 0)
4056 expected_tx_seq_offset
+= 64;
4058 /* duplicated tx_seq */
4059 if (tx_seq_offset
< expected_tx_seq_offset
)
4062 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
4064 BT_DBG("sk %p, Enter SREJ", sk
);
4066 INIT_LIST_HEAD(SREJ_LIST(sk
));
4067 pi
->buffer_seq_srej
= pi
->buffer_seq
;
4069 __skb_queue_head_init(SREJ_QUEUE(sk
));
4070 __skb_queue_head_init(BUSY_QUEUE(sk
));
4071 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4073 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
4075 l2cap_send_srejframe(sk
, tx_seq
);
4077 del_timer(&pi
->ack_timer
);
4082 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4084 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4085 bt_cb(skb
)->tx_seq
= tx_seq
;
4086 bt_cb(skb
)->sar
= sar
;
4087 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
4091 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
4095 if (rx_control
& L2CAP_CTRL_FINAL
) {
4096 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4097 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4099 l2cap_retransmit_frames(sk
);
4104 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
4105 if (pi
->num_acked
== num_to_ack
- 1)
4115 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4117 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4119 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4122 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4123 l2cap_drop_acked_frames(sk
);
4125 if (rx_control
& L2CAP_CTRL_POLL
) {
4126 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4127 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4128 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4129 (pi
->unacked_frames
> 0))
4130 __mod_retrans_timer();
4132 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4133 l2cap_send_srejtail(sk
);
4135 l2cap_send_i_or_rr_or_rnr(sk
);
4138 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4139 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4141 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4142 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4144 l2cap_retransmit_frames(sk
);
4147 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4148 (pi
->unacked_frames
> 0))
4149 __mod_retrans_timer();
4151 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4152 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
4155 l2cap_ertm_send(sk
);
4159 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4161 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4162 u8 tx_seq
= __get_reqseq(rx_control
);
4164 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4166 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4168 pi
->expected_ack_seq
= tx_seq
;
4169 l2cap_drop_acked_frames(sk
);
4171 if (rx_control
& L2CAP_CTRL_FINAL
) {
4172 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4173 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4175 l2cap_retransmit_frames(sk
);
4177 l2cap_retransmit_frames(sk
);
4179 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4180 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4183 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4185 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4186 u8 tx_seq
= __get_reqseq(rx_control
);
4188 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4190 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4192 if (rx_control
& L2CAP_CTRL_POLL
) {
4193 pi
->expected_ack_seq
= tx_seq
;
4194 l2cap_drop_acked_frames(sk
);
4196 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4197 l2cap_retransmit_one_frame(sk
, tx_seq
);
4199 l2cap_ertm_send(sk
);
4201 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4202 pi
->srej_save_reqseq
= tx_seq
;
4203 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4205 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4206 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4207 pi
->srej_save_reqseq
== tx_seq
)
4208 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4210 l2cap_retransmit_one_frame(sk
, tx_seq
);
4212 l2cap_retransmit_one_frame(sk
, tx_seq
);
4213 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4214 pi
->srej_save_reqseq
= tx_seq
;
4215 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4220 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4222 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4223 u8 tx_seq
= __get_reqseq(rx_control
);
4225 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4227 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4228 pi
->expected_ack_seq
= tx_seq
;
4229 l2cap_drop_acked_frames(sk
);
4231 if (rx_control
& L2CAP_CTRL_POLL
)
4232 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4234 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4235 del_timer(&pi
->retrans_timer
);
4236 if (rx_control
& L2CAP_CTRL_POLL
)
4237 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4241 if (rx_control
& L2CAP_CTRL_POLL
)
4242 l2cap_send_srejtail(sk
);
4244 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4247 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4249 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4251 if (L2CAP_CTRL_FINAL
& rx_control
&&
4252 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4253 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4254 if (l2cap_pi(sk
)->unacked_frames
> 0)
4255 __mod_retrans_timer();
4256 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4259 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4260 case L2CAP_SUPER_RCV_READY
:
4261 l2cap_data_channel_rrframe(sk
, rx_control
);
4264 case L2CAP_SUPER_REJECT
:
4265 l2cap_data_channel_rejframe(sk
, rx_control
);
4268 case L2CAP_SUPER_SELECT_REJECT
:
4269 l2cap_data_channel_srejframe(sk
, rx_control
);
4272 case L2CAP_SUPER_RCV_NOT_READY
:
4273 l2cap_data_channel_rnrframe(sk
, rx_control
);
4281 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4283 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4286 int len
, next_tx_seq_offset
, req_seq_offset
;
4288 control
= get_unaligned_le16(skb
->data
);
4293 * We can just drop the corrupted I-frame here.
4294 * Receiver will miss it and start proper recovery
4295 * procedures and ask retransmission.
4297 if (l2cap_check_fcs(pi
, skb
))
4300 if (__is_sar_start(control
) && __is_iframe(control
))
4303 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4306 if (len
> pi
->mps
) {
4307 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4311 req_seq
= __get_reqseq(control
);
4312 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4313 if (req_seq_offset
< 0)
4314 req_seq_offset
+= 64;
4316 next_tx_seq_offset
=
4317 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4318 if (next_tx_seq_offset
< 0)
4319 next_tx_seq_offset
+= 64;
4321 /* check for invalid req-seq */
4322 if (req_seq_offset
> next_tx_seq_offset
) {
4323 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4327 if (__is_iframe(control
)) {
4329 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4333 l2cap_data_channel_iframe(sk
, control
, skb
);
4337 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4341 l2cap_data_channel_sframe(sk
, control
, skb
);
4351 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4354 struct l2cap_pinfo
*pi
;
4359 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4361 BT_DBG("unknown cid 0x%4.4x", cid
);
4367 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4369 if (sk
->sk_state
!= BT_CONNECTED
)
4373 case L2CAP_MODE_BASIC
:
4374 /* If socket recv buffers overflows we drop data here
4375 * which is *bad* because L2CAP has to be reliable.
4376 * But we don't have any other choice. L2CAP doesn't
4377 * provide flow control mechanism. */
4379 if (pi
->imtu
< skb
->len
)
4382 if (!sock_queue_rcv_skb(sk
, skb
))
4386 case L2CAP_MODE_ERTM
:
4387 if (!sock_owned_by_user(sk
)) {
4388 l2cap_ertm_data_rcv(sk
, skb
);
4390 if (sk_add_backlog(sk
, skb
))
4396 case L2CAP_MODE_STREAMING
:
4397 control
= get_unaligned_le16(skb
->data
);
4401 if (l2cap_check_fcs(pi
, skb
))
4404 if (__is_sar_start(control
))
4407 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4410 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4413 tx_seq
= __get_txseq(control
);
4415 if (pi
->expected_tx_seq
== tx_seq
)
4416 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4418 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4420 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4425 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4439 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4443 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4449 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4451 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4454 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4457 if (!sock_queue_rcv_skb(sk
, skb
))
4469 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4471 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4475 skb_pull(skb
, L2CAP_HDR_SIZE
);
4476 cid
= __le16_to_cpu(lh
->cid
);
4477 len
= __le16_to_cpu(lh
->len
);
4479 if (len
!= skb
->len
) {
4484 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4487 case L2CAP_CID_SIGNALING
:
4488 l2cap_sig_channel(conn
, skb
);
4491 case L2CAP_CID_CONN_LESS
:
4492 psm
= get_unaligned_le16(skb
->data
);
4494 l2cap_conless_channel(conn
, psm
, skb
);
4498 l2cap_data_channel(conn
, cid
, skb
);
4503 /* ---- L2CAP interface with lower layer (HCI) ---- */
4505 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4507 int exact
= 0, lm1
= 0, lm2
= 0;
4508 register struct sock
*sk
;
4509 struct hlist_node
*node
;
4511 if (type
!= ACL_LINK
)
4514 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4516 /* Find listening sockets and check their link_mode */
4517 read_lock(&l2cap_sk_list
.lock
);
4518 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4519 if (sk
->sk_state
!= BT_LISTEN
)
4522 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4523 lm1
|= HCI_LM_ACCEPT
;
4524 if (l2cap_pi(sk
)->role_switch
)
4525 lm1
|= HCI_LM_MASTER
;
4527 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4528 lm2
|= HCI_LM_ACCEPT
;
4529 if (l2cap_pi(sk
)->role_switch
)
4530 lm2
|= HCI_LM_MASTER
;
4533 read_unlock(&l2cap_sk_list
.lock
);
4535 return exact
? lm1
: lm2
;
4538 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4540 struct l2cap_conn
*conn
;
4542 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4544 if (hcon
->type
!= ACL_LINK
)
4548 conn
= l2cap_conn_add(hcon
, status
);
4550 l2cap_conn_ready(conn
);
4552 l2cap_conn_del(hcon
, bt_err(status
));
4557 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4559 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4561 BT_DBG("hcon %p", hcon
);
4563 if (hcon
->type
!= ACL_LINK
|| !conn
)
4566 return conn
->disc_reason
;
4569 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4571 BT_DBG("hcon %p reason %d", hcon
, reason
);
4573 if (hcon
->type
!= ACL_LINK
)
4576 l2cap_conn_del(hcon
, bt_err(reason
));
4581 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4583 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4586 if (encrypt
== 0x00) {
4587 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4588 l2cap_sock_clear_timer(sk
);
4589 l2cap_sock_set_timer(sk
, HZ
* 5);
4590 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4591 __l2cap_sock_close(sk
, ECONNREFUSED
);
4593 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4594 l2cap_sock_clear_timer(sk
);
4598 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4600 struct l2cap_chan_list
*l
;
4601 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4607 l
= &conn
->chan_list
;
4609 BT_DBG("conn %p", conn
);
4611 read_lock(&l
->lock
);
4613 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4616 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4621 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4622 sk
->sk_state
== BT_CONFIG
)) {
4623 l2cap_check_encryption(sk
, encrypt
);
4628 if (sk
->sk_state
== BT_CONNECT
) {
4630 struct l2cap_conn_req req
;
4631 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4632 req
.psm
= l2cap_pi(sk
)->psm
;
4634 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4635 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4637 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4638 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4640 l2cap_sock_clear_timer(sk
);
4641 l2cap_sock_set_timer(sk
, HZ
/ 10);
4643 } else if (sk
->sk_state
== BT_CONNECT2
) {
4644 struct l2cap_conn_rsp rsp
;
4648 sk
->sk_state
= BT_CONFIG
;
4649 result
= L2CAP_CR_SUCCESS
;
4651 sk
->sk_state
= BT_DISCONN
;
4652 l2cap_sock_set_timer(sk
, HZ
/ 10);
4653 result
= L2CAP_CR_SEC_BLOCK
;
4656 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4657 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4658 rsp
.result
= cpu_to_le16(result
);
4659 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4660 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4661 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4667 read_unlock(&l
->lock
);
4672 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4674 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4676 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4679 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4681 if (flags
& ACL_START
) {
4682 struct l2cap_hdr
*hdr
;
4688 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4689 kfree_skb(conn
->rx_skb
);
4690 conn
->rx_skb
= NULL
;
4692 l2cap_conn_unreliable(conn
, ECOMM
);
4695 /* Start fragment always begin with Basic L2CAP header */
4696 if (skb
->len
< L2CAP_HDR_SIZE
) {
4697 BT_ERR("Frame is too short (len %d)", skb
->len
);
4698 l2cap_conn_unreliable(conn
, ECOMM
);
4702 hdr
= (struct l2cap_hdr
*) skb
->data
;
4703 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4704 cid
= __le16_to_cpu(hdr
->cid
);
4706 if (len
== skb
->len
) {
4707 /* Complete frame received */
4708 l2cap_recv_frame(conn
, skb
);
4712 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4714 if (skb
->len
> len
) {
4715 BT_ERR("Frame is too long (len %d, expected len %d)",
4717 l2cap_conn_unreliable(conn
, ECOMM
);
4721 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4723 if (sk
&& l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
4724 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4725 len
, l2cap_pi(sk
)->imtu
);
4727 l2cap_conn_unreliable(conn
, ECOMM
);
4734 /* Allocate skb for the complete frame (with header) */
4735 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4739 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4741 conn
->rx_len
= len
- skb
->len
;
4743 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4745 if (!conn
->rx_len
) {
4746 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4747 l2cap_conn_unreliable(conn
, ECOMM
);
4751 if (skb
->len
> conn
->rx_len
) {
4752 BT_ERR("Fragment is too long (len %d, expected %d)",
4753 skb
->len
, conn
->rx_len
);
4754 kfree_skb(conn
->rx_skb
);
4755 conn
->rx_skb
= NULL
;
4757 l2cap_conn_unreliable(conn
, ECOMM
);
4761 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4763 conn
->rx_len
-= skb
->len
;
4765 if (!conn
->rx_len
) {
4766 /* Complete frame received */
4767 l2cap_recv_frame(conn
, conn
->rx_skb
);
4768 conn
->rx_skb
= NULL
;
4777 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4780 struct hlist_node
*node
;
4782 read_lock_bh(&l2cap_sk_list
.lock
);
4784 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4785 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4787 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4788 batostr(&bt_sk(sk
)->src
),
4789 batostr(&bt_sk(sk
)->dst
),
4790 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4792 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4795 read_unlock_bh(&l2cap_sk_list
.lock
);
4800 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4802 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4805 static const struct file_operations l2cap_debugfs_fops
= {
4806 .open
= l2cap_debugfs_open
,
4808 .llseek
= seq_lseek
,
4809 .release
= single_release
,
4812 static struct dentry
*l2cap_debugfs
;
4814 static const struct proto_ops l2cap_sock_ops
= {
4815 .family
= PF_BLUETOOTH
,
4816 .owner
= THIS_MODULE
,
4817 .release
= l2cap_sock_release
,
4818 .bind
= l2cap_sock_bind
,
4819 .connect
= l2cap_sock_connect
,
4820 .listen
= l2cap_sock_listen
,
4821 .accept
= l2cap_sock_accept
,
4822 .getname
= l2cap_sock_getname
,
4823 .sendmsg
= l2cap_sock_sendmsg
,
4824 .recvmsg
= l2cap_sock_recvmsg
,
4825 .poll
= bt_sock_poll
,
4826 .ioctl
= bt_sock_ioctl
,
4827 .mmap
= sock_no_mmap
,
4828 .socketpair
= sock_no_socketpair
,
4829 .shutdown
= l2cap_sock_shutdown
,
4830 .setsockopt
= l2cap_sock_setsockopt
,
4831 .getsockopt
= l2cap_sock_getsockopt
4834 static const struct net_proto_family l2cap_sock_family_ops
= {
4835 .family
= PF_BLUETOOTH
,
4836 .owner
= THIS_MODULE
,
4837 .create
= l2cap_sock_create
,
4840 static struct hci_proto l2cap_hci_proto
= {
4842 .id
= HCI_PROTO_L2CAP
,
4843 .connect_ind
= l2cap_connect_ind
,
4844 .connect_cfm
= l2cap_connect_cfm
,
4845 .disconn_ind
= l2cap_disconn_ind
,
4846 .disconn_cfm
= l2cap_disconn_cfm
,
4847 .security_cfm
= l2cap_security_cfm
,
4848 .recv_acldata
= l2cap_recv_acldata
4851 static int __init
l2cap_init(void)
4855 err
= proto_register(&l2cap_proto
, 0);
4859 _busy_wq
= create_singlethread_workqueue("l2cap");
4861 proto_unregister(&l2cap_proto
);
4865 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4867 BT_ERR("L2CAP socket registration failed");
4871 err
= hci_register_proto(&l2cap_hci_proto
);
4873 BT_ERR("L2CAP protocol registration failed");
4874 bt_sock_unregister(BTPROTO_L2CAP
);
4879 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4880 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4882 BT_ERR("Failed to create L2CAP debug file");
4885 BT_INFO("L2CAP ver %s", VERSION
);
4886 BT_INFO("L2CAP socket layer initialized");
4891 destroy_workqueue(_busy_wq
);
4892 proto_unregister(&l2cap_proto
);
4896 static void __exit
l2cap_exit(void)
4898 debugfs_remove(l2cap_debugfs
);
4900 flush_workqueue(_busy_wq
);
4901 destroy_workqueue(_busy_wq
);
4903 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4904 BT_ERR("L2CAP socket unregistration failed");
4906 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4907 BT_ERR("L2CAP protocol unregistration failed");
4909 proto_unregister(&l2cap_proto
);
4912 void l2cap_load(void)
4914 /* Dummy function to trigger automatic L2CAP module loading by
4915 * other modules that use L2CAP sockets but don't use any other
4916 * symbols from it. */
4918 EXPORT_SYMBOL(l2cap_load
);
4920 module_init(l2cap_init
);
4921 module_exit(l2cap_exit
);
4923 module_param(disable_ertm
, bool, 0644);
4924 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4926 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4927 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4928 MODULE_VERSION(VERSION
);
4929 MODULE_LICENSE("GPL");
4930 MODULE_ALIAS("bt-proto-0");