2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
62 static u8 l2cap_fixed_chan
[8] = { 0x02, };
64 static LIST_HEAD(chan_list
);
65 static DEFINE_RWLOCK(chan_list_lock
);
67 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
68 u8 code
, u8 ident
, u16 dlen
, void *data
);
69 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
71 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
72 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
73 struct l2cap_chan
*chan
, int err
);
75 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan
*c
)
81 atomic_inc(&c
->refcnt
);
84 static inline void chan_put(struct l2cap_chan
*c
)
86 if (atomic_dec_and_test(&c
->refcnt
))
90 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
94 list_for_each_entry(c
, &conn
->chan_l
, list
) {
102 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
104 struct l2cap_chan
*c
;
106 list_for_each_entry(c
, &conn
->chan_l
, list
) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
117 struct l2cap_chan
*c
;
119 read_lock(&conn
->chan_lock
);
120 c
= __l2cap_get_chan_by_scid(conn
, cid
);
123 read_unlock(&conn
->chan_lock
);
127 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
129 struct l2cap_chan
*c
;
131 list_for_each_entry(c
, &conn
->chan_l
, list
) {
132 if (c
->ident
== ident
)
138 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
140 struct l2cap_chan
*c
;
142 read_lock(&conn
->chan_lock
);
143 c
= __l2cap_get_chan_by_ident(conn
, ident
);
146 read_unlock(&conn
->chan_lock
);
150 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
152 struct l2cap_chan
*c
;
154 list_for_each_entry(c
, &chan_list
, global_l
) {
155 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
164 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
168 write_lock_bh(&chan_list_lock
);
170 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
183 for (p
= 0x1001; p
< 0x1100; p
+= 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
185 chan
->psm
= cpu_to_le16(p
);
186 chan
->sport
= cpu_to_le16(p
);
193 write_unlock_bh(&chan_list_lock
);
197 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
199 write_lock_bh(&chan_list_lock
);
203 write_unlock_bh(&chan_list_lock
);
208 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
210 u16 cid
= L2CAP_CID_DYN_START
;
212 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
213 if (!__l2cap_get_chan_by_scid(conn
, cid
))
220 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
222 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->state
, timeout
);
224 if (!mod_timer(timer
, jiffies
+ msecs_to_jiffies(timeout
)))
228 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
230 BT_DBG("chan %p state %d", chan
, chan
->state
);
232 if (timer_pending(timer
) && del_timer(timer
))
236 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
239 chan
->ops
->state_change(chan
->data
, state
);
242 static void l2cap_chan_timeout(unsigned long arg
)
244 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
245 struct sock
*sk
= chan
->sk
;
248 BT_DBG("chan %p state %d", chan
, chan
->state
);
252 if (sock_owned_by_user(sk
)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
260 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
261 reason
= ECONNREFUSED
;
262 else if (chan
->state
== BT_CONNECT
&&
263 chan
->sec_level
!= BT_SECURITY_SDP
)
264 reason
= ECONNREFUSED
;
268 l2cap_chan_close(chan
, reason
);
272 chan
->ops
->close(chan
->data
);
276 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
278 struct l2cap_chan
*chan
;
280 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
286 write_lock_bh(&chan_list_lock
);
287 list_add(&chan
->global_l
, &chan_list
);
288 write_unlock_bh(&chan_list_lock
);
290 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
292 chan
->state
= BT_OPEN
;
294 atomic_set(&chan
->refcnt
, 1);
299 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
301 write_lock_bh(&chan_list_lock
);
302 list_del(&chan
->global_l
);
303 write_unlock_bh(&chan_list_lock
);
308 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
311 chan
->psm
, chan
->dcid
);
313 conn
->disc_reason
= 0x13;
317 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
318 if (conn
->hcon
->type
== LE_LINK
) {
320 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
321 chan
->scid
= L2CAP_CID_LE_DATA
;
322 chan
->dcid
= L2CAP_CID_LE_DATA
;
324 /* Alloc CID for connection-oriented socket */
325 chan
->scid
= l2cap_alloc_cid(conn
);
326 chan
->omtu
= L2CAP_DEFAULT_MTU
;
328 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
329 /* Connectionless socket */
330 chan
->scid
= L2CAP_CID_CONN_LESS
;
331 chan
->dcid
= L2CAP_CID_CONN_LESS
;
332 chan
->omtu
= L2CAP_DEFAULT_MTU
;
334 /* Raw socket can send/recv signalling messages only */
335 chan
->scid
= L2CAP_CID_SIGNALING
;
336 chan
->dcid
= L2CAP_CID_SIGNALING
;
337 chan
->omtu
= L2CAP_DEFAULT_MTU
;
342 list_add(&chan
->list
, &conn
->chan_l
);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
349 struct sock
*sk
= chan
->sk
;
350 struct l2cap_conn
*conn
= chan
->conn
;
351 struct sock
*parent
= bt_sk(sk
)->parent
;
353 __clear_chan_timer(chan
);
355 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
358 /* Delete from channel list */
359 write_lock_bh(&conn
->chan_lock
);
360 list_del(&chan
->list
);
361 write_unlock_bh(&conn
->chan_lock
);
365 hci_conn_put(conn
->hcon
);
368 l2cap_state_change(chan
, BT_CLOSED
);
369 sock_set_flag(sk
, SOCK_ZAPPED
);
375 bt_accept_unlink(sk
);
376 parent
->sk_data_ready(parent
, 0);
378 sk
->sk_state_change(sk
);
380 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
381 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
384 skb_queue_purge(&chan
->tx_q
);
386 if (chan
->mode
== L2CAP_MODE_ERTM
) {
387 struct srej_list
*l
, *tmp
;
389 __clear_retrans_timer(chan
);
390 __clear_monitor_timer(chan
);
391 __clear_ack_timer(chan
);
393 skb_queue_purge(&chan
->srej_q
);
395 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
402 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
406 BT_DBG("parent %p", parent
);
408 /* Close not yet accepted channels */
409 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
410 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
411 __clear_chan_timer(chan
);
413 l2cap_chan_close(chan
, ECONNRESET
);
415 chan
->ops
->close(chan
->data
);
419 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
421 struct l2cap_conn
*conn
= chan
->conn
;
422 struct sock
*sk
= chan
->sk
;
424 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
426 switch (chan
->state
) {
428 l2cap_chan_cleanup_listen(sk
);
430 l2cap_state_change(chan
, BT_CLOSED
);
431 sock_set_flag(sk
, SOCK_ZAPPED
);
436 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
437 conn
->hcon
->type
== ACL_LINK
) {
438 __clear_chan_timer(chan
);
439 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
440 l2cap_send_disconn_req(conn
, chan
, reason
);
442 l2cap_chan_del(chan
, reason
);
446 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
447 conn
->hcon
->type
== ACL_LINK
) {
448 struct l2cap_conn_rsp rsp
;
451 if (bt_sk(sk
)->defer_setup
)
452 result
= L2CAP_CR_SEC_BLOCK
;
454 result
= L2CAP_CR_BAD_PSM
;
455 l2cap_state_change(chan
, BT_DISCONN
);
457 rsp
.scid
= cpu_to_le16(chan
->dcid
);
458 rsp
.dcid
= cpu_to_le16(chan
->scid
);
459 rsp
.result
= cpu_to_le16(result
);
460 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
461 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
465 l2cap_chan_del(chan
, reason
);
470 l2cap_chan_del(chan
, reason
);
474 sock_set_flag(sk
, SOCK_ZAPPED
);
479 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
481 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
482 switch (chan
->sec_level
) {
483 case BT_SECURITY_HIGH
:
484 return HCI_AT_DEDICATED_BONDING_MITM
;
485 case BT_SECURITY_MEDIUM
:
486 return HCI_AT_DEDICATED_BONDING
;
488 return HCI_AT_NO_BONDING
;
490 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
491 if (chan
->sec_level
== BT_SECURITY_LOW
)
492 chan
->sec_level
= BT_SECURITY_SDP
;
494 if (chan
->sec_level
== BT_SECURITY_HIGH
)
495 return HCI_AT_NO_BONDING_MITM
;
497 return HCI_AT_NO_BONDING
;
499 switch (chan
->sec_level
) {
500 case BT_SECURITY_HIGH
:
501 return HCI_AT_GENERAL_BONDING_MITM
;
502 case BT_SECURITY_MEDIUM
:
503 return HCI_AT_GENERAL_BONDING
;
505 return HCI_AT_NO_BONDING
;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
513 struct l2cap_conn
*conn
= chan
->conn
;
516 auth_type
= l2cap_get_auth_type(chan
);
518 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
521 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn
->lock
);
533 if (++conn
->tx_ident
> 128)
538 spin_unlock_bh(&conn
->lock
);
543 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
545 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
548 BT_DBG("code 0x%2.2x", code
);
553 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
554 flags
= ACL_START_NO_FLUSH
;
558 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
560 hci_send_acl(conn
->hcon
, skb
, flags
);
563 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
566 struct l2cap_hdr
*lh
;
567 struct l2cap_conn
*conn
= chan
->conn
;
568 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
571 if (chan
->state
!= BT_CONNECTED
)
574 if (chan
->fcs
== L2CAP_FCS_CRC16
)
577 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
579 count
= min_t(unsigned int, conn
->mtu
, hlen
);
580 control
|= L2CAP_CTRL_FRAME_TYPE
;
582 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
583 control
|= L2CAP_CTRL_FINAL
;
585 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
586 control
|= L2CAP_CTRL_POLL
;
588 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
592 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
593 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
594 lh
->cid
= cpu_to_le16(chan
->dcid
);
595 put_unaligned_le16(control
, skb_put(skb
, 2));
597 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
598 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
599 put_unaligned_le16(fcs
, skb_put(skb
, 2));
602 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
603 flags
= ACL_START_NO_FLUSH
;
607 bt_cb(skb
)->force_active
= chan
->force_active
;
609 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
614 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
615 control
|= L2CAP_SUPER_RCV_NOT_READY
;
616 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
618 control
|= L2CAP_SUPER_RCV_READY
;
620 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
622 l2cap_send_sframe(chan
, control
);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
627 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
630 static void l2cap_do_start(struct l2cap_chan
*chan
)
632 struct l2cap_conn
*conn
= chan
->conn
;
634 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
635 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
638 if (l2cap_check_security(chan
) &&
639 __l2cap_no_conn_pending(chan
)) {
640 struct l2cap_conn_req req
;
641 req
.scid
= cpu_to_le16(chan
->scid
);
644 chan
->ident
= l2cap_get_ident(conn
);
645 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
647 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
651 struct l2cap_info_req req
;
652 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
654 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
655 conn
->info_ident
= l2cap_get_ident(conn
);
657 mod_timer(&conn
->info_timer
, jiffies
+
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
660 l2cap_send_cmd(conn
, conn
->info_ident
,
661 L2CAP_INFO_REQ
, sizeof(req
), &req
);
665 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
667 u32 local_feat_mask
= l2cap_feat_mask
;
669 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
672 case L2CAP_MODE_ERTM
:
673 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
674 case L2CAP_MODE_STREAMING
:
675 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
681 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
684 struct l2cap_disconn_req req
;
691 if (chan
->mode
== L2CAP_MODE_ERTM
) {
692 __clear_retrans_timer(chan
);
693 __clear_monitor_timer(chan
);
694 __clear_ack_timer(chan
);
697 req
.dcid
= cpu_to_le16(chan
->dcid
);
698 req
.scid
= cpu_to_le16(chan
->scid
);
699 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
700 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
702 l2cap_state_change(chan
, BT_DISCONN
);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn
*conn
)
709 struct l2cap_chan
*chan
, *tmp
;
711 BT_DBG("conn %p", conn
);
713 read_lock(&conn
->chan_lock
);
715 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
716 struct sock
*sk
= chan
->sk
;
720 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
725 if (chan
->state
== BT_CONNECT
) {
726 struct l2cap_conn_req req
;
728 if (!l2cap_check_security(chan
) ||
729 !__l2cap_no_conn_pending(chan
)) {
734 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
735 && test_bit(CONF_STATE2_DEVICE
,
736 &chan
->conf_state
)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn
->chan_lock
);
740 l2cap_chan_close(chan
, ECONNRESET
);
741 read_lock(&conn
->chan_lock
);
746 req
.scid
= cpu_to_le16(chan
->scid
);
749 chan
->ident
= l2cap_get_ident(conn
);
750 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
752 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
755 } else if (chan
->state
== BT_CONNECT2
) {
756 struct l2cap_conn_rsp rsp
;
758 rsp
.scid
= cpu_to_le16(chan
->dcid
);
759 rsp
.dcid
= cpu_to_le16(chan
->scid
);
761 if (l2cap_check_security(chan
)) {
762 if (bt_sk(sk
)->defer_setup
) {
763 struct sock
*parent
= bt_sk(sk
)->parent
;
764 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
765 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
767 parent
->sk_data_ready(parent
, 0);
770 l2cap_state_change(chan
, BT_CONFIG
);
771 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
772 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
775 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
776 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
779 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
782 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
783 rsp
.result
!= L2CAP_CR_SUCCESS
) {
788 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
789 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
790 l2cap_build_conf_req(chan
, buf
), buf
);
791 chan
->num_conf_req
++;
797 read_unlock(&conn
->chan_lock
);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
805 struct l2cap_chan
*c
, *c1
= NULL
;
807 read_lock(&chan_list_lock
);
809 list_for_each_entry(c
, &chan_list
, global_l
) {
810 struct sock
*sk
= c
->sk
;
812 if (state
&& c
->state
!= state
)
815 if (c
->scid
== cid
) {
817 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
818 read_unlock(&chan_list_lock
);
823 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
828 read_unlock(&chan_list_lock
);
833 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
835 struct sock
*parent
, *sk
;
836 struct l2cap_chan
*chan
, *pchan
;
840 /* Check if we have socket listening on cid */
841 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
848 bh_lock_sock(parent
);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent
)) {
852 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
856 chan
= pchan
->ops
->new_connection(pchan
->data
);
862 write_lock_bh(&conn
->chan_lock
);
864 hci_conn_hold(conn
->hcon
);
866 bacpy(&bt_sk(sk
)->src
, conn
->src
);
867 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
869 bt_accept_enqueue(parent
, sk
);
871 __l2cap_chan_add(conn
, chan
);
873 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
875 l2cap_state_change(chan
, BT_CONNECTED
);
876 parent
->sk_data_ready(parent
, 0);
878 write_unlock_bh(&conn
->chan_lock
);
881 bh_unlock_sock(parent
);
884 static void l2cap_chan_ready(struct sock
*sk
)
886 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
887 struct sock
*parent
= bt_sk(sk
)->parent
;
889 BT_DBG("sk %p, parent %p", sk
, parent
);
891 chan
->conf_state
= 0;
892 __clear_chan_timer(chan
);
894 l2cap_state_change(chan
, BT_CONNECTED
);
895 sk
->sk_state_change(sk
);
898 parent
->sk_data_ready(parent
, 0);
901 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
903 struct l2cap_chan
*chan
;
905 BT_DBG("conn %p", conn
);
907 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
908 l2cap_le_conn_ready(conn
);
910 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
911 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
913 read_lock(&conn
->chan_lock
);
915 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
916 struct sock
*sk
= chan
->sk
;
920 if (conn
->hcon
->type
== LE_LINK
) {
921 if (smp_conn_security(conn
, chan
->sec_level
))
922 l2cap_chan_ready(sk
);
924 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
925 __clear_chan_timer(chan
);
926 l2cap_state_change(chan
, BT_CONNECTED
);
927 sk
->sk_state_change(sk
);
929 } else if (chan
->state
== BT_CONNECT
)
930 l2cap_do_start(chan
);
935 read_unlock(&conn
->chan_lock
);
938 /* Notify sockets that we cannot guaranty reliability anymore */
939 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
941 struct l2cap_chan
*chan
;
943 BT_DBG("conn %p", conn
);
945 read_lock(&conn
->chan_lock
);
947 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
948 struct sock
*sk
= chan
->sk
;
950 if (chan
->force_reliable
)
954 read_unlock(&conn
->chan_lock
);
957 static void l2cap_info_timeout(unsigned long arg
)
959 struct l2cap_conn
*conn
= (void *) arg
;
961 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
962 conn
->info_ident
= 0;
964 l2cap_conn_start(conn
);
967 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
969 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
970 struct l2cap_chan
*chan
, *l
;
976 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
978 kfree_skb(conn
->rx_skb
);
981 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
984 l2cap_chan_del(chan
, err
);
986 chan
->ops
->close(chan
->data
);
989 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
990 del_timer_sync(&conn
->info_timer
);
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
993 del_timer(&conn
->security_timer
);
994 smp_chan_destroy(conn
);
997 hcon
->l2cap_data
= NULL
;
1001 static void security_timeout(unsigned long arg
)
1003 struct l2cap_conn
*conn
= (void *) arg
;
1005 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1008 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1010 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1015 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1019 hcon
->l2cap_data
= conn
;
1022 BT_DBG("hcon %p conn %p", hcon
, conn
);
1024 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1025 conn
->mtu
= hcon
->hdev
->le_mtu
;
1027 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1029 conn
->src
= &hcon
->hdev
->bdaddr
;
1030 conn
->dst
= &hcon
->dst
;
1032 conn
->feat_mask
= 0;
1034 spin_lock_init(&conn
->lock
);
1035 rwlock_init(&conn
->chan_lock
);
1037 INIT_LIST_HEAD(&conn
->chan_l
);
1039 if (hcon
->type
== LE_LINK
)
1040 setup_timer(&conn
->security_timer
, security_timeout
,
1041 (unsigned long) conn
);
1043 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1044 (unsigned long) conn
);
1046 conn
->disc_reason
= 0x13;
1051 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1053 write_lock_bh(&conn
->chan_lock
);
1054 __l2cap_chan_add(conn
, chan
);
1055 write_unlock_bh(&conn
->chan_lock
);
1058 /* ---- Socket interface ---- */
1060 /* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1063 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1065 struct l2cap_chan
*c
, *c1
= NULL
;
1067 read_lock(&chan_list_lock
);
1069 list_for_each_entry(c
, &chan_list
, global_l
) {
1070 struct sock
*sk
= c
->sk
;
1072 if (state
&& c
->state
!= state
)
1075 if (c
->psm
== psm
) {
1077 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1078 read_unlock(&chan_list_lock
);
1083 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1088 read_unlock(&chan_list_lock
);
1093 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1095 struct sock
*sk
= chan
->sk
;
1096 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1097 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1098 struct l2cap_conn
*conn
;
1099 struct hci_conn
*hcon
;
1100 struct hci_dev
*hdev
;
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1107 hdev
= hci_get_route(dst
, src
);
1109 return -EHOSTUNREACH
;
1111 hci_dev_lock_bh(hdev
);
1113 auth_type
= l2cap_get_auth_type(chan
);
1115 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1116 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1117 chan
->sec_level
, auth_type
);
1119 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1120 chan
->sec_level
, auth_type
);
1123 err
= PTR_ERR(hcon
);
1127 conn
= l2cap_conn_add(hcon
, 0);
1134 /* Update source addr of the socket */
1135 bacpy(src
, conn
->src
);
1137 l2cap_chan_add(conn
, chan
);
1139 l2cap_state_change(chan
, BT_CONNECT
);
1140 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1142 if (hcon
->state
== BT_CONNECTED
) {
1143 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1144 __clear_chan_timer(chan
);
1145 if (l2cap_check_security(chan
))
1146 l2cap_state_change(chan
, BT_CONNECTED
);
1148 l2cap_do_start(chan
);
1154 hci_dev_unlock_bh(hdev
);
1159 int __l2cap_wait_ack(struct sock
*sk
)
1161 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1162 DECLARE_WAITQUEUE(wait
, current
);
1166 add_wait_queue(sk_sleep(sk
), &wait
);
1167 set_current_state(TASK_INTERRUPTIBLE
);
1168 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1172 if (signal_pending(current
)) {
1173 err
= sock_intr_errno(timeo
);
1178 timeo
= schedule_timeout(timeo
);
1180 set_current_state(TASK_INTERRUPTIBLE
);
1182 err
= sock_error(sk
);
1186 set_current_state(TASK_RUNNING
);
1187 remove_wait_queue(sk_sleep(sk
), &wait
);
1191 static void l2cap_monitor_timeout(unsigned long arg
)
1193 struct l2cap_chan
*chan
= (void *) arg
;
1194 struct sock
*sk
= chan
->sk
;
1196 BT_DBG("chan %p", chan
);
1199 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1200 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1205 chan
->retry_count
++;
1206 __set_monitor_timer(chan
);
1208 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1212 static void l2cap_retrans_timeout(unsigned long arg
)
1214 struct l2cap_chan
*chan
= (void *) arg
;
1215 struct sock
*sk
= chan
->sk
;
1217 BT_DBG("chan %p", chan
);
1220 chan
->retry_count
= 1;
1221 __set_monitor_timer(chan
);
1223 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1225 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1229 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1231 struct sk_buff
*skb
;
1233 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1234 chan
->unacked_frames
) {
1235 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1238 skb
= skb_dequeue(&chan
->tx_q
);
1241 chan
->unacked_frames
--;
1244 if (!chan
->unacked_frames
)
1245 __clear_retrans_timer(chan
);
1248 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1250 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1253 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1255 if (!chan
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1256 flags
= ACL_START_NO_FLUSH
;
1260 bt_cb(skb
)->force_active
= chan
->force_active
;
1261 hci_send_acl(hcon
, skb
, flags
);
1264 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1266 struct sk_buff
*skb
;
1269 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1270 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1271 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1272 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1274 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1275 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1276 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1279 l2cap_do_send(chan
, skb
);
1281 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1285 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1287 struct sk_buff
*skb
, *tx_skb
;
1290 skb
= skb_peek(&chan
->tx_q
);
1295 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1298 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1301 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1303 if (chan
->remote_max_tx
&&
1304 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1305 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1309 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1310 bt_cb(skb
)->retries
++;
1311 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1312 control
&= L2CAP_CTRL_SAR
;
1314 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1315 control
|= L2CAP_CTRL_FINAL
;
1317 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1318 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1320 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1322 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1323 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1324 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1327 l2cap_do_send(chan
, tx_skb
);
1330 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1332 struct sk_buff
*skb
, *tx_skb
;
1336 if (chan
->state
!= BT_CONNECTED
)
1339 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1341 if (chan
->remote_max_tx
&&
1342 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1343 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1347 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1349 bt_cb(skb
)->retries
++;
1351 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1352 control
&= L2CAP_CTRL_SAR
;
1354 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1355 control
|= L2CAP_CTRL_FINAL
;
1357 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1358 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1359 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1362 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1363 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1364 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1367 l2cap_do_send(chan
, tx_skb
);
1369 __set_retrans_timer(chan
);
1371 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1372 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1374 if (bt_cb(skb
)->retries
== 1)
1375 chan
->unacked_frames
++;
1377 chan
->frames_sent
++;
1379 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1380 chan
->tx_send_head
= NULL
;
1382 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1390 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1394 if (!skb_queue_empty(&chan
->tx_q
))
1395 chan
->tx_send_head
= chan
->tx_q
.next
;
1397 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1398 ret
= l2cap_ertm_send(chan
);
1402 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1406 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1408 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1409 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1410 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1411 l2cap_send_sframe(chan
, control
);
1415 if (l2cap_ertm_send(chan
) > 0)
1418 control
|= L2CAP_SUPER_RCV_READY
;
1419 l2cap_send_sframe(chan
, control
);
1422 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1424 struct srej_list
*tail
;
1427 control
= L2CAP_SUPER_SELECT_REJECT
;
1428 control
|= L2CAP_CTRL_FINAL
;
1430 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1431 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1433 l2cap_send_sframe(chan
, control
);
1436 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1438 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1439 struct sk_buff
**frag
;
1442 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1448 /* Continuation fragments (no L2CAP header) */
1449 frag
= &skb_shinfo(skb
)->frag_list
;
1451 count
= min_t(unsigned int, conn
->mtu
, len
);
1453 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1456 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1462 frag
= &(*frag
)->next
;
1468 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1470 struct sock
*sk
= chan
->sk
;
1471 struct l2cap_conn
*conn
= chan
->conn
;
1472 struct sk_buff
*skb
;
1473 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1474 struct l2cap_hdr
*lh
;
1476 BT_DBG("sk %p len %d", sk
, (int)len
);
1478 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1479 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1480 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1482 return ERR_PTR(err
);
1484 /* Create L2CAP header */
1485 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1486 lh
->cid
= cpu_to_le16(chan
->dcid
);
1487 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1488 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1490 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1491 if (unlikely(err
< 0)) {
1493 return ERR_PTR(err
);
1498 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1500 struct sock
*sk
= chan
->sk
;
1501 struct l2cap_conn
*conn
= chan
->conn
;
1502 struct sk_buff
*skb
;
1503 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1504 struct l2cap_hdr
*lh
;
1506 BT_DBG("sk %p len %d", sk
, (int)len
);
1508 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1509 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1510 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1512 return ERR_PTR(err
);
1514 /* Create L2CAP header */
1515 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1516 lh
->cid
= cpu_to_le16(chan
->dcid
);
1517 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1519 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1520 if (unlikely(err
< 0)) {
1522 return ERR_PTR(err
);
1527 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1528 struct msghdr
*msg
, size_t len
,
1529 u16 control
, u16 sdulen
)
1531 struct sock
*sk
= chan
->sk
;
1532 struct l2cap_conn
*conn
= chan
->conn
;
1533 struct sk_buff
*skb
;
1534 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1535 struct l2cap_hdr
*lh
;
1537 BT_DBG("sk %p len %d", sk
, (int)len
);
1540 return ERR_PTR(-ENOTCONN
);
1545 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1548 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1549 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1550 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1552 return ERR_PTR(err
);
1554 /* Create L2CAP header */
1555 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1556 lh
->cid
= cpu_to_le16(chan
->dcid
);
1557 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1558 put_unaligned_le16(control
, skb_put(skb
, 2));
1560 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1562 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1563 if (unlikely(err
< 0)) {
1565 return ERR_PTR(err
);
1568 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1569 put_unaligned_le16(0, skb_put(skb
, 2));
1571 bt_cb(skb
)->retries
= 0;
1575 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1577 struct sk_buff
*skb
;
1578 struct sk_buff_head sar_queue
;
1582 skb_queue_head_init(&sar_queue
);
1583 control
= L2CAP_SDU_START
;
1584 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1586 return PTR_ERR(skb
);
1588 __skb_queue_tail(&sar_queue
, skb
);
1589 len
-= chan
->remote_mps
;
1590 size
+= chan
->remote_mps
;
1595 if (len
> chan
->remote_mps
) {
1596 control
= L2CAP_SDU_CONTINUE
;
1597 buflen
= chan
->remote_mps
;
1599 control
= L2CAP_SDU_END
;
1603 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1605 skb_queue_purge(&sar_queue
);
1606 return PTR_ERR(skb
);
1609 __skb_queue_tail(&sar_queue
, skb
);
1613 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1614 if (chan
->tx_send_head
== NULL
)
1615 chan
->tx_send_head
= sar_queue
.next
;
1620 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1622 struct sk_buff
*skb
;
1626 /* Connectionless channel */
1627 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1628 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1630 return PTR_ERR(skb
);
1632 l2cap_do_send(chan
, skb
);
1636 switch (chan
->mode
) {
1637 case L2CAP_MODE_BASIC
:
1638 /* Check outgoing MTU */
1639 if (len
> chan
->omtu
)
1642 /* Create a basic PDU */
1643 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1645 return PTR_ERR(skb
);
1647 l2cap_do_send(chan
, skb
);
1651 case L2CAP_MODE_ERTM
:
1652 case L2CAP_MODE_STREAMING
:
1653 /* Entire SDU fits into one PDU */
1654 if (len
<= chan
->remote_mps
) {
1655 control
= L2CAP_SDU_UNSEGMENTED
;
1656 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1659 return PTR_ERR(skb
);
1661 __skb_queue_tail(&chan
->tx_q
, skb
);
1663 if (chan
->tx_send_head
== NULL
)
1664 chan
->tx_send_head
= skb
;
1667 /* Segment SDU into multiples PDUs */
1668 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1673 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1674 l2cap_streaming_send(chan
);
1679 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1680 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1685 err
= l2cap_ertm_send(chan
);
1692 BT_DBG("bad state %1.1x", chan
->mode
);
1699 /* Copy frame to all raw sockets on that connection */
1700 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1702 struct sk_buff
*nskb
;
1703 struct l2cap_chan
*chan
;
1705 BT_DBG("conn %p", conn
);
1707 read_lock(&conn
->chan_lock
);
1708 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1709 struct sock
*sk
= chan
->sk
;
1710 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1713 /* Don't send frame to the socket it came from */
1716 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1720 if (chan
->ops
->recv(chan
->data
, nskb
))
1723 read_unlock(&conn
->chan_lock
);
1726 /* ---- L2CAP signalling commands ---- */
1727 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1728 u8 code
, u8 ident
, u16 dlen
, void *data
)
1730 struct sk_buff
*skb
, **frag
;
1731 struct l2cap_cmd_hdr
*cmd
;
1732 struct l2cap_hdr
*lh
;
1735 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1736 conn
, code
, ident
, dlen
);
1738 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1739 count
= min_t(unsigned int, conn
->mtu
, len
);
1741 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1745 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1746 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1748 if (conn
->hcon
->type
== LE_LINK
)
1749 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1751 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1753 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1756 cmd
->len
= cpu_to_le16(dlen
);
1759 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1760 memcpy(skb_put(skb
, count
), data
, count
);
1766 /* Continuation fragments (no L2CAP header) */
1767 frag
= &skb_shinfo(skb
)->frag_list
;
1769 count
= min_t(unsigned int, conn
->mtu
, len
);
1771 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1775 memcpy(skb_put(*frag
, count
), data
, count
);
1780 frag
= &(*frag
)->next
;
1790 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1792 struct l2cap_conf_opt
*opt
= *ptr
;
1795 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1803 *val
= *((u8
*) opt
->val
);
1807 *val
= get_unaligned_le16(opt
->val
);
1811 *val
= get_unaligned_le32(opt
->val
);
1815 *val
= (unsigned long) opt
->val
;
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1823 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1825 struct l2cap_conf_opt
*opt
= *ptr
;
1827 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1834 *((u8
*) opt
->val
) = val
;
1838 put_unaligned_le16(val
, opt
->val
);
1842 put_unaligned_le32(val
, opt
->val
);
1846 memcpy(opt
->val
, (void *) val
, len
);
1850 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1853 static void l2cap_ack_timeout(unsigned long arg
)
1855 struct l2cap_chan
*chan
= (void *) arg
;
1857 bh_lock_sock(chan
->sk
);
1858 l2cap_send_ack(chan
);
1859 bh_unlock_sock(chan
->sk
);
1862 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1864 struct sock
*sk
= chan
->sk
;
1866 chan
->expected_ack_seq
= 0;
1867 chan
->unacked_frames
= 0;
1868 chan
->buffer_seq
= 0;
1869 chan
->num_acked
= 0;
1870 chan
->frames_sent
= 0;
1872 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1873 (unsigned long) chan
);
1874 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1875 (unsigned long) chan
);
1876 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1878 skb_queue_head_init(&chan
->srej_q
);
1880 INIT_LIST_HEAD(&chan
->srej_l
);
1883 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1886 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1889 case L2CAP_MODE_STREAMING
:
1890 case L2CAP_MODE_ERTM
:
1891 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1895 return L2CAP_MODE_BASIC
;
1899 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1901 struct l2cap_conf_req
*req
= data
;
1902 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1903 void *ptr
= req
->data
;
1905 BT_DBG("chan %p", chan
);
1907 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1910 switch (chan
->mode
) {
1911 case L2CAP_MODE_STREAMING
:
1912 case L2CAP_MODE_ERTM
:
1913 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
1918 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1923 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1924 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1926 switch (chan
->mode
) {
1927 case L2CAP_MODE_BASIC
:
1928 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1929 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1932 rfc
.mode
= L2CAP_MODE_BASIC
;
1934 rfc
.max_transmit
= 0;
1935 rfc
.retrans_timeout
= 0;
1936 rfc
.monitor_timeout
= 0;
1937 rfc
.max_pdu_size
= 0;
1939 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1940 (unsigned long) &rfc
);
1943 case L2CAP_MODE_ERTM
:
1944 rfc
.mode
= L2CAP_MODE_ERTM
;
1945 rfc
.txwin_size
= chan
->tx_win
;
1946 rfc
.max_transmit
= chan
->max_tx
;
1947 rfc
.retrans_timeout
= 0;
1948 rfc
.monitor_timeout
= 0;
1949 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1950 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1951 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1953 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1954 (unsigned long) &rfc
);
1956 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1959 if (chan
->fcs
== L2CAP_FCS_NONE
||
1960 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
1961 chan
->fcs
= L2CAP_FCS_NONE
;
1962 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1966 case L2CAP_MODE_STREAMING
:
1967 rfc
.mode
= L2CAP_MODE_STREAMING
;
1969 rfc
.max_transmit
= 0;
1970 rfc
.retrans_timeout
= 0;
1971 rfc
.monitor_timeout
= 0;
1972 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1973 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1974 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1976 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1977 (unsigned long) &rfc
);
1979 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1982 if (chan
->fcs
== L2CAP_FCS_NONE
||
1983 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
1984 chan
->fcs
= L2CAP_FCS_NONE
;
1985 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1990 req
->dcid
= cpu_to_le16(chan
->dcid
);
1991 req
->flags
= cpu_to_le16(0);
1996 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1998 struct l2cap_conf_rsp
*rsp
= data
;
1999 void *ptr
= rsp
->data
;
2000 void *req
= chan
->conf_req
;
2001 int len
= chan
->conf_len
;
2002 int type
, hint
, olen
;
2004 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2005 u16 mtu
= L2CAP_DEFAULT_MTU
;
2006 u16 result
= L2CAP_CONF_SUCCESS
;
2008 BT_DBG("chan %p", chan
);
2010 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2011 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2013 hint
= type
& L2CAP_CONF_HINT
;
2014 type
&= L2CAP_CONF_MASK
;
2017 case L2CAP_CONF_MTU
:
2021 case L2CAP_CONF_FLUSH_TO
:
2022 chan
->flush_to
= val
;
2025 case L2CAP_CONF_QOS
:
2028 case L2CAP_CONF_RFC
:
2029 if (olen
== sizeof(rfc
))
2030 memcpy(&rfc
, (void *) val
, olen
);
2033 case L2CAP_CONF_FCS
:
2034 if (val
== L2CAP_FCS_NONE
)
2035 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2043 result
= L2CAP_CONF_UNKNOWN
;
2044 *((u8
*) ptr
++) = type
;
2049 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2052 switch (chan
->mode
) {
2053 case L2CAP_MODE_STREAMING
:
2054 case L2CAP_MODE_ERTM
:
2055 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2056 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2057 chan
->conn
->feat_mask
);
2061 if (chan
->mode
!= rfc
.mode
)
2062 return -ECONNREFUSED
;
2068 if (chan
->mode
!= rfc
.mode
) {
2069 result
= L2CAP_CONF_UNACCEPT
;
2070 rfc
.mode
= chan
->mode
;
2072 if (chan
->num_conf_rsp
== 1)
2073 return -ECONNREFUSED
;
2075 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2076 sizeof(rfc
), (unsigned long) &rfc
);
2080 if (result
== L2CAP_CONF_SUCCESS
) {
2081 /* Configure output options and let the other side know
2082 * which ones we don't like. */
2084 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2085 result
= L2CAP_CONF_UNACCEPT
;
2088 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2090 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2093 case L2CAP_MODE_BASIC
:
2094 chan
->fcs
= L2CAP_FCS_NONE
;
2095 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2098 case L2CAP_MODE_ERTM
:
2099 chan
->remote_tx_win
= rfc
.txwin_size
;
2100 chan
->remote_max_tx
= rfc
.max_transmit
;
2102 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2103 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2105 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2107 rfc
.retrans_timeout
=
2108 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2109 rfc
.monitor_timeout
=
2110 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2112 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2114 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2115 sizeof(rfc
), (unsigned long) &rfc
);
2119 case L2CAP_MODE_STREAMING
:
2120 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2121 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2123 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2125 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2127 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2128 sizeof(rfc
), (unsigned long) &rfc
);
2133 result
= L2CAP_CONF_UNACCEPT
;
2135 memset(&rfc
, 0, sizeof(rfc
));
2136 rfc
.mode
= chan
->mode
;
2139 if (result
== L2CAP_CONF_SUCCESS
)
2140 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2142 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2143 rsp
->result
= cpu_to_le16(result
);
2144 rsp
->flags
= cpu_to_le16(0x0000);
2149 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2151 struct l2cap_conf_req
*req
= data
;
2152 void *ptr
= req
->data
;
2155 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2159 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2160 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2163 case L2CAP_CONF_MTU
:
2164 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2165 *result
= L2CAP_CONF_UNACCEPT
;
2166 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2169 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2172 case L2CAP_CONF_FLUSH_TO
:
2173 chan
->flush_to
= val
;
2174 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2178 case L2CAP_CONF_RFC
:
2179 if (olen
== sizeof(rfc
))
2180 memcpy(&rfc
, (void *)val
, olen
);
2182 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2183 rfc
.mode
!= chan
->mode
)
2184 return -ECONNREFUSED
;
2188 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2189 sizeof(rfc
), (unsigned long) &rfc
);
2194 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2195 return -ECONNREFUSED
;
2197 chan
->mode
= rfc
.mode
;
2199 if (*result
== L2CAP_CONF_SUCCESS
) {
2201 case L2CAP_MODE_ERTM
:
2202 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2203 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2204 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2206 case L2CAP_MODE_STREAMING
:
2207 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2211 req
->dcid
= cpu_to_le16(chan
->dcid
);
2212 req
->flags
= cpu_to_le16(0x0000);
2217 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2219 struct l2cap_conf_rsp
*rsp
= data
;
2220 void *ptr
= rsp
->data
;
2222 BT_DBG("chan %p", chan
);
2224 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2225 rsp
->result
= cpu_to_le16(result
);
2226 rsp
->flags
= cpu_to_le16(flags
);
2231 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2233 struct l2cap_conn_rsp rsp
;
2234 struct l2cap_conn
*conn
= chan
->conn
;
2237 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2238 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2239 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2240 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2241 l2cap_send_cmd(conn
, chan
->ident
,
2242 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2244 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2247 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2248 l2cap_build_conf_req(chan
, buf
), buf
);
2249 chan
->num_conf_req
++;
2252 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2256 struct l2cap_conf_rfc rfc
;
2258 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2260 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2263 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2264 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2267 case L2CAP_CONF_RFC
:
2268 if (olen
== sizeof(rfc
))
2269 memcpy(&rfc
, (void *)val
, olen
);
2274 /* Use sane default values in case a misbehaving remote device
2275 * did not send an RFC option.
2277 rfc
.mode
= chan
->mode
;
2278 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2279 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2280 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2282 BT_ERR("Expected RFC option was not found, using defaults");
2286 case L2CAP_MODE_ERTM
:
2287 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2288 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2289 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2291 case L2CAP_MODE_STREAMING
:
2292 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2296 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2298 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2300 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2303 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2304 cmd
->ident
== conn
->info_ident
) {
2305 del_timer(&conn
->info_timer
);
2307 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2308 conn
->info_ident
= 0;
2310 l2cap_conn_start(conn
);
2316 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2318 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2319 struct l2cap_conn_rsp rsp
;
2320 struct l2cap_chan
*chan
= NULL
, *pchan
;
2321 struct sock
*parent
, *sk
= NULL
;
2322 int result
, status
= L2CAP_CS_NO_INFO
;
2324 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2325 __le16 psm
= req
->psm
;
2327 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2329 /* Check if we have socket listening on psm */
2330 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2332 result
= L2CAP_CR_BAD_PSM
;
2338 bh_lock_sock(parent
);
2340 /* Check if the ACL is secure enough (if not SDP) */
2341 if (psm
!= cpu_to_le16(0x0001) &&
2342 !hci_conn_check_link_mode(conn
->hcon
)) {
2343 conn
->disc_reason
= 0x05;
2344 result
= L2CAP_CR_SEC_BLOCK
;
2348 result
= L2CAP_CR_NO_MEM
;
2350 /* Check for backlog size */
2351 if (sk_acceptq_is_full(parent
)) {
2352 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2356 chan
= pchan
->ops
->new_connection(pchan
->data
);
2362 write_lock_bh(&conn
->chan_lock
);
2364 /* Check if we already have channel with that dcid */
2365 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2366 write_unlock_bh(&conn
->chan_lock
);
2367 sock_set_flag(sk
, SOCK_ZAPPED
);
2368 chan
->ops
->close(chan
->data
);
2372 hci_conn_hold(conn
->hcon
);
2374 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2375 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2379 bt_accept_enqueue(parent
, sk
);
2381 __l2cap_chan_add(conn
, chan
);
2385 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2387 chan
->ident
= cmd
->ident
;
2389 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2390 if (l2cap_check_security(chan
)) {
2391 if (bt_sk(sk
)->defer_setup
) {
2392 l2cap_state_change(chan
, BT_CONNECT2
);
2393 result
= L2CAP_CR_PEND
;
2394 status
= L2CAP_CS_AUTHOR_PEND
;
2395 parent
->sk_data_ready(parent
, 0);
2397 l2cap_state_change(chan
, BT_CONFIG
);
2398 result
= L2CAP_CR_SUCCESS
;
2399 status
= L2CAP_CS_NO_INFO
;
2402 l2cap_state_change(chan
, BT_CONNECT2
);
2403 result
= L2CAP_CR_PEND
;
2404 status
= L2CAP_CS_AUTHEN_PEND
;
2407 l2cap_state_change(chan
, BT_CONNECT2
);
2408 result
= L2CAP_CR_PEND
;
2409 status
= L2CAP_CS_NO_INFO
;
2412 write_unlock_bh(&conn
->chan_lock
);
2415 bh_unlock_sock(parent
);
2418 rsp
.scid
= cpu_to_le16(scid
);
2419 rsp
.dcid
= cpu_to_le16(dcid
);
2420 rsp
.result
= cpu_to_le16(result
);
2421 rsp
.status
= cpu_to_le16(status
);
2422 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2424 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2425 struct l2cap_info_req info
;
2426 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2428 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2429 conn
->info_ident
= l2cap_get_ident(conn
);
2431 mod_timer(&conn
->info_timer
, jiffies
+
2432 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2434 l2cap_send_cmd(conn
, conn
->info_ident
,
2435 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2438 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2439 result
== L2CAP_CR_SUCCESS
) {
2441 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2442 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2443 l2cap_build_conf_req(chan
, buf
), buf
);
2444 chan
->num_conf_req
++;
2450 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2452 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2453 u16 scid
, dcid
, result
, status
;
2454 struct l2cap_chan
*chan
;
2458 scid
= __le16_to_cpu(rsp
->scid
);
2459 dcid
= __le16_to_cpu(rsp
->dcid
);
2460 result
= __le16_to_cpu(rsp
->result
);
2461 status
= __le16_to_cpu(rsp
->status
);
2463 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2466 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2470 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2478 case L2CAP_CR_SUCCESS
:
2479 l2cap_state_change(chan
, BT_CONFIG
);
2482 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2484 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2487 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2488 l2cap_build_conf_req(chan
, req
), req
);
2489 chan
->num_conf_req
++;
2493 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2497 /* don't delete l2cap channel if sk is owned by user */
2498 if (sock_owned_by_user(sk
)) {
2499 l2cap_state_change(chan
, BT_DISCONN
);
2500 __clear_chan_timer(chan
);
2501 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
2505 l2cap_chan_del(chan
, ECONNREFUSED
);
2513 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2515 /* FCS is enabled only in ERTM or streaming mode, if one or both
2518 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2519 chan
->fcs
= L2CAP_FCS_NONE
;
2520 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2521 chan
->fcs
= L2CAP_FCS_CRC16
;
2524 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2526 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2529 struct l2cap_chan
*chan
;
2533 dcid
= __le16_to_cpu(req
->dcid
);
2534 flags
= __le16_to_cpu(req
->flags
);
2536 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2538 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2544 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2545 struct l2cap_cmd_rej_cid rej
;
2547 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2548 rej
.scid
= cpu_to_le16(chan
->scid
);
2549 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2551 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2556 /* Reject if config buffer is too small. */
2557 len
= cmd_len
- sizeof(*req
);
2558 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2559 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2560 l2cap_build_conf_rsp(chan
, rsp
,
2561 L2CAP_CONF_REJECT
, flags
), rsp
);
2566 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2567 chan
->conf_len
+= len
;
2569 if (flags
& 0x0001) {
2570 /* Incomplete config. Send empty response. */
2571 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2572 l2cap_build_conf_rsp(chan
, rsp
,
2573 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2577 /* Complete config. */
2578 len
= l2cap_parse_conf_req(chan
, rsp
);
2580 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2584 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2585 chan
->num_conf_rsp
++;
2587 /* Reset config buffer. */
2590 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2593 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2594 set_default_fcs(chan
);
2596 l2cap_state_change(chan
, BT_CONNECTED
);
2598 chan
->next_tx_seq
= 0;
2599 chan
->expected_tx_seq
= 0;
2600 skb_queue_head_init(&chan
->tx_q
);
2601 if (chan
->mode
== L2CAP_MODE_ERTM
)
2602 l2cap_ertm_init(chan
);
2604 l2cap_chan_ready(sk
);
2608 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2610 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2611 l2cap_build_conf_req(chan
, buf
), buf
);
2612 chan
->num_conf_req
++;
2620 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2622 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2623 u16 scid
, flags
, result
;
2624 struct l2cap_chan
*chan
;
2626 int len
= cmd
->len
- sizeof(*rsp
);
2628 scid
= __le16_to_cpu(rsp
->scid
);
2629 flags
= __le16_to_cpu(rsp
->flags
);
2630 result
= __le16_to_cpu(rsp
->result
);
2632 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2633 scid
, flags
, result
);
2635 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2642 case L2CAP_CONF_SUCCESS
:
2643 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2646 case L2CAP_CONF_UNACCEPT
:
2647 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2650 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2651 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2655 /* throw out any old stored conf requests */
2656 result
= L2CAP_CONF_SUCCESS
;
2657 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2660 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2664 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2665 L2CAP_CONF_REQ
, len
, req
);
2666 chan
->num_conf_req
++;
2667 if (result
!= L2CAP_CONF_SUCCESS
)
2673 sk
->sk_err
= ECONNRESET
;
2674 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
2675 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2682 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2684 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2685 set_default_fcs(chan
);
2687 l2cap_state_change(chan
, BT_CONNECTED
);
2688 chan
->next_tx_seq
= 0;
2689 chan
->expected_tx_seq
= 0;
2690 skb_queue_head_init(&chan
->tx_q
);
2691 if (chan
->mode
== L2CAP_MODE_ERTM
)
2692 l2cap_ertm_init(chan
);
2694 l2cap_chan_ready(sk
);
2702 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2704 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2705 struct l2cap_disconn_rsp rsp
;
2707 struct l2cap_chan
*chan
;
2710 scid
= __le16_to_cpu(req
->scid
);
2711 dcid
= __le16_to_cpu(req
->dcid
);
2713 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2715 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2721 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2722 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2723 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2725 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2727 /* don't delete l2cap channel if sk is owned by user */
2728 if (sock_owned_by_user(sk
)) {
2729 l2cap_state_change(chan
, BT_DISCONN
);
2730 __clear_chan_timer(chan
);
2731 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
2736 l2cap_chan_del(chan
, ECONNRESET
);
2739 chan
->ops
->close(chan
->data
);
2743 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2745 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2747 struct l2cap_chan
*chan
;
2750 scid
= __le16_to_cpu(rsp
->scid
);
2751 dcid
= __le16_to_cpu(rsp
->dcid
);
2753 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2755 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2761 /* don't delete l2cap channel if sk is owned by user */
2762 if (sock_owned_by_user(sk
)) {
2763 l2cap_state_change(chan
,BT_DISCONN
);
2764 __clear_chan_timer(chan
);
2765 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
2770 l2cap_chan_del(chan
, 0);
2773 chan
->ops
->close(chan
->data
);
2777 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2779 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2782 type
= __le16_to_cpu(req
->type
);
2784 BT_DBG("type 0x%4.4x", type
);
2786 if (type
== L2CAP_IT_FEAT_MASK
) {
2788 u32 feat_mask
= l2cap_feat_mask
;
2789 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2790 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2791 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2793 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2795 put_unaligned_le32(feat_mask
, rsp
->data
);
2796 l2cap_send_cmd(conn
, cmd
->ident
,
2797 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2798 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2800 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2801 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2802 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2803 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2804 l2cap_send_cmd(conn
, cmd
->ident
,
2805 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2807 struct l2cap_info_rsp rsp
;
2808 rsp
.type
= cpu_to_le16(type
);
2809 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2810 l2cap_send_cmd(conn
, cmd
->ident
,
2811 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2817 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2819 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2822 type
= __le16_to_cpu(rsp
->type
);
2823 result
= __le16_to_cpu(rsp
->result
);
2825 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2827 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2828 if (cmd
->ident
!= conn
->info_ident
||
2829 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2832 del_timer(&conn
->info_timer
);
2834 if (result
!= L2CAP_IR_SUCCESS
) {
2835 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2836 conn
->info_ident
= 0;
2838 l2cap_conn_start(conn
);
2843 if (type
== L2CAP_IT_FEAT_MASK
) {
2844 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2846 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2847 struct l2cap_info_req req
;
2848 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2850 conn
->info_ident
= l2cap_get_ident(conn
);
2852 l2cap_send_cmd(conn
, conn
->info_ident
,
2853 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2855 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2856 conn
->info_ident
= 0;
2858 l2cap_conn_start(conn
);
2860 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2861 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2862 conn
->info_ident
= 0;
2864 l2cap_conn_start(conn
);
2870 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2875 if (min
> max
|| min
< 6 || max
> 3200)
2878 if (to_multiplier
< 10 || to_multiplier
> 3200)
2881 if (max
>= to_multiplier
* 8)
2884 max_latency
= (to_multiplier
* 8 / max
) - 1;
2885 if (latency
> 499 || latency
> max_latency
)
2891 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2892 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2894 struct hci_conn
*hcon
= conn
->hcon
;
2895 struct l2cap_conn_param_update_req
*req
;
2896 struct l2cap_conn_param_update_rsp rsp
;
2897 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2900 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2903 cmd_len
= __le16_to_cpu(cmd
->len
);
2904 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2907 req
= (struct l2cap_conn_param_update_req
*) data
;
2908 min
= __le16_to_cpu(req
->min
);
2909 max
= __le16_to_cpu(req
->max
);
2910 latency
= __le16_to_cpu(req
->latency
);
2911 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2913 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2914 min
, max
, latency
, to_multiplier
);
2916 memset(&rsp
, 0, sizeof(rsp
));
2918 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2920 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2922 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2924 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2928 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2933 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2934 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2938 switch (cmd
->code
) {
2939 case L2CAP_COMMAND_REJ
:
2940 l2cap_command_rej(conn
, cmd
, data
);
2943 case L2CAP_CONN_REQ
:
2944 err
= l2cap_connect_req(conn
, cmd
, data
);
2947 case L2CAP_CONN_RSP
:
2948 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2951 case L2CAP_CONF_REQ
:
2952 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2955 case L2CAP_CONF_RSP
:
2956 err
= l2cap_config_rsp(conn
, cmd
, data
);
2959 case L2CAP_DISCONN_REQ
:
2960 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2963 case L2CAP_DISCONN_RSP
:
2964 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2967 case L2CAP_ECHO_REQ
:
2968 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2971 case L2CAP_ECHO_RSP
:
2974 case L2CAP_INFO_REQ
:
2975 err
= l2cap_information_req(conn
, cmd
, data
);
2978 case L2CAP_INFO_RSP
:
2979 err
= l2cap_information_rsp(conn
, cmd
, data
);
2983 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2991 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2992 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2994 switch (cmd
->code
) {
2995 case L2CAP_COMMAND_REJ
:
2998 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2999 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3001 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3005 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3010 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3011 struct sk_buff
*skb
)
3013 u8
*data
= skb
->data
;
3015 struct l2cap_cmd_hdr cmd
;
3018 l2cap_raw_recv(conn
, skb
);
3020 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3022 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3023 data
+= L2CAP_CMD_HDR_SIZE
;
3024 len
-= L2CAP_CMD_HDR_SIZE
;
3026 cmd_len
= le16_to_cpu(cmd
.len
);
3028 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3030 if (cmd_len
> len
|| !cmd
.ident
) {
3031 BT_DBG("corrupted command");
3035 if (conn
->hcon
->type
== LE_LINK
)
3036 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3038 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3041 struct l2cap_cmd_rej_unk rej
;
3043 BT_ERR("Wrong link type (%d)", err
);
3045 /* FIXME: Map err to a valid reason */
3046 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3047 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3057 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3059 u16 our_fcs
, rcv_fcs
;
3060 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3062 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3063 skb_trim(skb
, skb
->len
- 2);
3064 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3065 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3067 if (our_fcs
!= rcv_fcs
)
3073 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3077 chan
->frames_sent
= 0;
3079 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3081 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3082 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3083 l2cap_send_sframe(chan
, control
);
3084 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3087 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3088 l2cap_retransmit_frames(chan
);
3090 l2cap_ertm_send(chan
);
3092 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3093 chan
->frames_sent
== 0) {
3094 control
|= L2CAP_SUPER_RCV_READY
;
3095 l2cap_send_sframe(chan
, control
);
3099 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3101 struct sk_buff
*next_skb
;
3102 int tx_seq_offset
, next_tx_seq_offset
;
3104 bt_cb(skb
)->tx_seq
= tx_seq
;
3105 bt_cb(skb
)->sar
= sar
;
3107 next_skb
= skb_peek(&chan
->srej_q
);
3109 __skb_queue_tail(&chan
->srej_q
, skb
);
3113 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3114 if (tx_seq_offset
< 0)
3115 tx_seq_offset
+= 64;
3118 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3121 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3122 chan
->buffer_seq
) % 64;
3123 if (next_tx_seq_offset
< 0)
3124 next_tx_seq_offset
+= 64;
3126 if (next_tx_seq_offset
> tx_seq_offset
) {
3127 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3131 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3134 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3136 __skb_queue_tail(&chan
->srej_q
, skb
);
3141 static void append_skb_frag(struct sk_buff
*skb
,
3142 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3144 /* skb->len reflects data in skb as well as all fragments
3145 * skb->data_len reflects only data in fragments
3147 if (!skb_has_frag_list(skb
))
3148 skb_shinfo(skb
)->frag_list
= new_frag
;
3150 new_frag
->next
= NULL
;
3152 (*last_frag
)->next
= new_frag
;
3153 *last_frag
= new_frag
;
3155 skb
->len
+= new_frag
->len
;
3156 skb
->data_len
+= new_frag
->len
;
3157 skb
->truesize
+= new_frag
->truesize
;
3160 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3164 switch (control
& L2CAP_CTRL_SAR
) {
3165 case L2CAP_SDU_UNSEGMENTED
:
3169 err
= chan
->ops
->recv(chan
->data
, skb
);
3172 case L2CAP_SDU_START
:
3176 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3179 if (chan
->sdu_len
> chan
->imtu
) {
3184 if (skb
->len
>= chan
->sdu_len
)
3188 chan
->sdu_last_frag
= skb
;
3194 case L2CAP_SDU_CONTINUE
:
3198 append_skb_frag(chan
->sdu
, skb
,
3199 &chan
->sdu_last_frag
);
3202 if (chan
->sdu
->len
>= chan
->sdu_len
)
3212 append_skb_frag(chan
->sdu
, skb
,
3213 &chan
->sdu_last_frag
);
3216 if (chan
->sdu
->len
!= chan
->sdu_len
)
3219 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3222 /* Reassembly complete */
3224 chan
->sdu_last_frag
= NULL
;
3232 kfree_skb(chan
->sdu
);
3234 chan
->sdu_last_frag
= NULL
;
3241 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3245 BT_DBG("chan %p, Enter local busy", chan
);
3247 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3249 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3250 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3251 l2cap_send_sframe(chan
, control
);
3253 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3255 __clear_ack_timer(chan
);
3258 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3262 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3265 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3266 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3267 l2cap_send_sframe(chan
, control
);
3268 chan
->retry_count
= 1;
3270 __clear_retrans_timer(chan
);
3271 __set_monitor_timer(chan
);
3273 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3276 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3277 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3279 BT_DBG("chan %p, Exit local busy", chan
);
3282 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3284 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3286 l2cap_ertm_enter_local_busy(chan
);
3288 l2cap_ertm_exit_local_busy(chan
);
3292 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3294 struct sk_buff
*skb
;
3297 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3298 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3301 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3304 skb
= skb_dequeue(&chan
->srej_q
);
3305 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3306 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3309 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3313 chan
->buffer_seq_srej
=
3314 (chan
->buffer_seq_srej
+ 1) % 64;
3315 tx_seq
= (tx_seq
+ 1) % 64;
3319 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3321 struct srej_list
*l
, *tmp
;
3324 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3325 if (l
->tx_seq
== tx_seq
) {
3330 control
= L2CAP_SUPER_SELECT_REJECT
;
3331 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3332 l2cap_send_sframe(chan
, control
);
3334 list_add_tail(&l
->list
, &chan
->srej_l
);
3338 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3340 struct srej_list
*new;
3343 while (tx_seq
!= chan
->expected_tx_seq
) {
3344 control
= L2CAP_SUPER_SELECT_REJECT
;
3345 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3346 l2cap_send_sframe(chan
, control
);
3348 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3349 new->tx_seq
= chan
->expected_tx_seq
;
3350 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3351 list_add_tail(&new->list
, &chan
->srej_l
);
3353 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3356 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3358 u8 tx_seq
= __get_txseq(rx_control
);
3359 u8 req_seq
= __get_reqseq(rx_control
);
3360 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3361 int tx_seq_offset
, expected_tx_seq_offset
;
3362 int num_to_ack
= (chan
->tx_win
/6) + 1;
3365 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3366 tx_seq
, rx_control
);
3368 if (L2CAP_CTRL_FINAL
& rx_control
&&
3369 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3370 __clear_monitor_timer(chan
);
3371 if (chan
->unacked_frames
> 0)
3372 __set_retrans_timer(chan
);
3373 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3376 chan
->expected_ack_seq
= req_seq
;
3377 l2cap_drop_acked_frames(chan
);
3379 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3380 if (tx_seq_offset
< 0)
3381 tx_seq_offset
+= 64;
3383 /* invalid tx_seq */
3384 if (tx_seq_offset
>= chan
->tx_win
) {
3385 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3389 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3392 if (tx_seq
== chan
->expected_tx_seq
)
3395 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3396 struct srej_list
*first
;
3398 first
= list_first_entry(&chan
->srej_l
,
3399 struct srej_list
, list
);
3400 if (tx_seq
== first
->tx_seq
) {
3401 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3402 l2cap_check_srej_gap(chan
, tx_seq
);
3404 list_del(&first
->list
);
3407 if (list_empty(&chan
->srej_l
)) {
3408 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3409 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3410 l2cap_send_ack(chan
);
3411 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3414 struct srej_list
*l
;
3416 /* duplicated tx_seq */
3417 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3420 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3421 if (l
->tx_seq
== tx_seq
) {
3422 l2cap_resend_srejframe(chan
, tx_seq
);
3426 l2cap_send_srejframe(chan
, tx_seq
);
3429 expected_tx_seq_offset
=
3430 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3431 if (expected_tx_seq_offset
< 0)
3432 expected_tx_seq_offset
+= 64;
3434 /* duplicated tx_seq */
3435 if (tx_seq_offset
< expected_tx_seq_offset
)
3438 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3440 BT_DBG("chan %p, Enter SREJ", chan
);
3442 INIT_LIST_HEAD(&chan
->srej_l
);
3443 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3445 __skb_queue_head_init(&chan
->srej_q
);
3446 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3448 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3450 l2cap_send_srejframe(chan
, tx_seq
);
3452 __clear_ack_timer(chan
);
3457 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3459 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3460 bt_cb(skb
)->tx_seq
= tx_seq
;
3461 bt_cb(skb
)->sar
= sar
;
3462 __skb_queue_tail(&chan
->srej_q
, skb
);
3466 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3467 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3469 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3473 if (rx_control
& L2CAP_CTRL_FINAL
) {
3474 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3475 l2cap_retransmit_frames(chan
);
3478 __set_ack_timer(chan
);
3480 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3481 if (chan
->num_acked
== num_to_ack
- 1)
3482 l2cap_send_ack(chan
);
3491 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3493 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3496 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3497 l2cap_drop_acked_frames(chan
);
3499 if (rx_control
& L2CAP_CTRL_POLL
) {
3500 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3501 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3502 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3503 (chan
->unacked_frames
> 0))
3504 __set_retrans_timer(chan
);
3506 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3507 l2cap_send_srejtail(chan
);
3509 l2cap_send_i_or_rr_or_rnr(chan
);
3512 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3513 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3515 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3516 l2cap_retransmit_frames(chan
);
3519 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3520 (chan
->unacked_frames
> 0))
3521 __set_retrans_timer(chan
);
3523 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3524 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
3525 l2cap_send_ack(chan
);
3527 l2cap_ertm_send(chan
);
3531 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3533 u8 tx_seq
= __get_reqseq(rx_control
);
3535 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3537 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3539 chan
->expected_ack_seq
= tx_seq
;
3540 l2cap_drop_acked_frames(chan
);
3542 if (rx_control
& L2CAP_CTRL_FINAL
) {
3543 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3544 l2cap_retransmit_frames(chan
);
3546 l2cap_retransmit_frames(chan
);
3548 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
3549 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
3552 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3554 u8 tx_seq
= __get_reqseq(rx_control
);
3556 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3558 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3560 if (rx_control
& L2CAP_CTRL_POLL
) {
3561 chan
->expected_ack_seq
= tx_seq
;
3562 l2cap_drop_acked_frames(chan
);
3564 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3565 l2cap_retransmit_one_frame(chan
, tx_seq
);
3567 l2cap_ertm_send(chan
);
3569 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3570 chan
->srej_save_reqseq
= tx_seq
;
3571 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3573 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3574 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
3575 chan
->srej_save_reqseq
== tx_seq
)
3576 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3578 l2cap_retransmit_one_frame(chan
, tx_seq
);
3580 l2cap_retransmit_one_frame(chan
, tx_seq
);
3581 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3582 chan
->srej_save_reqseq
= tx_seq
;
3583 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3588 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3590 u8 tx_seq
= __get_reqseq(rx_control
);
3592 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3594 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3595 chan
->expected_ack_seq
= tx_seq
;
3596 l2cap_drop_acked_frames(chan
);
3598 if (rx_control
& L2CAP_CTRL_POLL
)
3599 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3601 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3602 __clear_retrans_timer(chan
);
3603 if (rx_control
& L2CAP_CTRL_POLL
)
3604 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3608 if (rx_control
& L2CAP_CTRL_POLL
)
3609 l2cap_send_srejtail(chan
);
3611 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3614 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3616 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3618 if (L2CAP_CTRL_FINAL
& rx_control
&&
3619 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3620 __clear_monitor_timer(chan
);
3621 if (chan
->unacked_frames
> 0)
3622 __set_retrans_timer(chan
);
3623 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3626 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3627 case L2CAP_SUPER_RCV_READY
:
3628 l2cap_data_channel_rrframe(chan
, rx_control
);
3631 case L2CAP_SUPER_REJECT
:
3632 l2cap_data_channel_rejframe(chan
, rx_control
);
3635 case L2CAP_SUPER_SELECT_REJECT
:
3636 l2cap_data_channel_srejframe(chan
, rx_control
);
3639 case L2CAP_SUPER_RCV_NOT_READY
:
3640 l2cap_data_channel_rnrframe(chan
, rx_control
);
3648 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3650 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3653 int len
, next_tx_seq_offset
, req_seq_offset
;
3655 control
= get_unaligned_le16(skb
->data
);
3660 * We can just drop the corrupted I-frame here.
3661 * Receiver will miss it and start proper recovery
3662 * procedures and ask retransmission.
3664 if (l2cap_check_fcs(chan
, skb
))
3667 if (__is_sar_start(control
) && __is_iframe(control
))
3670 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3673 if (len
> chan
->mps
) {
3674 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3678 req_seq
= __get_reqseq(control
);
3679 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3680 if (req_seq_offset
< 0)
3681 req_seq_offset
+= 64;
3683 next_tx_seq_offset
=
3684 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3685 if (next_tx_seq_offset
< 0)
3686 next_tx_seq_offset
+= 64;
3688 /* check for invalid req-seq */
3689 if (req_seq_offset
> next_tx_seq_offset
) {
3690 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3694 if (__is_iframe(control
)) {
3696 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3700 l2cap_data_channel_iframe(chan
, control
, skb
);
3704 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3708 l2cap_data_channel_sframe(chan
, control
, skb
);
3718 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3720 struct l2cap_chan
*chan
;
3721 struct sock
*sk
= NULL
;
3726 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3728 BT_DBG("unknown cid 0x%4.4x", cid
);
3734 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3736 if (chan
->state
!= BT_CONNECTED
)
3739 switch (chan
->mode
) {
3740 case L2CAP_MODE_BASIC
:
3741 /* If socket recv buffers overflows we drop data here
3742 * which is *bad* because L2CAP has to be reliable.
3743 * But we don't have any other choice. L2CAP doesn't
3744 * provide flow control mechanism. */
3746 if (chan
->imtu
< skb
->len
)
3749 if (!chan
->ops
->recv(chan
->data
, skb
))
3753 case L2CAP_MODE_ERTM
:
3754 if (!sock_owned_by_user(sk
)) {
3755 l2cap_ertm_data_rcv(sk
, skb
);
3757 if (sk_add_backlog(sk
, skb
))
3763 case L2CAP_MODE_STREAMING
:
3764 control
= get_unaligned_le16(skb
->data
);
3768 if (l2cap_check_fcs(chan
, skb
))
3771 if (__is_sar_start(control
))
3774 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3777 if (len
> chan
->mps
|| len
< 0 || __is_sframe(control
))
3780 tx_seq
= __get_txseq(control
);
3782 if (chan
->expected_tx_seq
!= tx_seq
) {
3783 /* Frame(s) missing - must discard partial SDU */
3784 kfree_skb(chan
->sdu
);
3786 chan
->sdu_last_frag
= NULL
;
3789 /* TODO: Notify userland of missing data */
3792 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3794 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
3795 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3800 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3814 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3816 struct sock
*sk
= NULL
;
3817 struct l2cap_chan
*chan
;
3819 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3827 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3829 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3832 if (chan
->imtu
< skb
->len
)
3835 if (!chan
->ops
->recv(chan
->data
, skb
))
3847 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3849 struct sock
*sk
= NULL
;
3850 struct l2cap_chan
*chan
;
3852 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
3860 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3862 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3865 if (chan
->imtu
< skb
->len
)
3868 if (!chan
->ops
->recv(chan
->data
, skb
))
3880 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3882 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3886 skb_pull(skb
, L2CAP_HDR_SIZE
);
3887 cid
= __le16_to_cpu(lh
->cid
);
3888 len
= __le16_to_cpu(lh
->len
);
3890 if (len
!= skb
->len
) {
3895 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3898 case L2CAP_CID_LE_SIGNALING
:
3899 case L2CAP_CID_SIGNALING
:
3900 l2cap_sig_channel(conn
, skb
);
3903 case L2CAP_CID_CONN_LESS
:
3904 psm
= get_unaligned_le16(skb
->data
);
3906 l2cap_conless_channel(conn
, psm
, skb
);
3909 case L2CAP_CID_LE_DATA
:
3910 l2cap_att_channel(conn
, cid
, skb
);
3914 if (smp_sig_channel(conn
, skb
))
3915 l2cap_conn_del(conn
->hcon
, EACCES
);
3919 l2cap_data_channel(conn
, cid
, skb
);
3924 /* ---- L2CAP interface with lower layer (HCI) ---- */
3926 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3928 int exact
= 0, lm1
= 0, lm2
= 0;
3929 struct l2cap_chan
*c
;
3931 if (type
!= ACL_LINK
)
3934 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3936 /* Find listening sockets and check their link_mode */
3937 read_lock(&chan_list_lock
);
3938 list_for_each_entry(c
, &chan_list
, global_l
) {
3939 struct sock
*sk
= c
->sk
;
3941 if (c
->state
!= BT_LISTEN
)
3944 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3945 lm1
|= HCI_LM_ACCEPT
;
3947 lm1
|= HCI_LM_MASTER
;
3949 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3950 lm2
|= HCI_LM_ACCEPT
;
3952 lm2
|= HCI_LM_MASTER
;
3955 read_unlock(&chan_list_lock
);
3957 return exact
? lm1
: lm2
;
3960 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3962 struct l2cap_conn
*conn
;
3964 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3966 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3970 conn
= l2cap_conn_add(hcon
, status
);
3972 l2cap_conn_ready(conn
);
3974 l2cap_conn_del(hcon
, bt_to_errno(status
));
3979 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3981 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3983 BT_DBG("hcon %p", hcon
);
3985 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
3988 return conn
->disc_reason
;
3991 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3993 BT_DBG("hcon %p reason %d", hcon
, reason
);
3995 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3998 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4003 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4005 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4008 if (encrypt
== 0x00) {
4009 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4010 __clear_chan_timer(chan
);
4011 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4012 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4013 l2cap_chan_close(chan
, ECONNREFUSED
);
4015 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4016 __clear_chan_timer(chan
);
4020 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4022 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4023 struct l2cap_chan
*chan
;
4028 BT_DBG("conn %p", conn
);
4030 if (hcon
->type
== LE_LINK
) {
4031 smp_distribute_keys(conn
, 0);
4032 del_timer(&conn
->security_timer
);
4035 read_lock(&conn
->chan_lock
);
4037 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4038 struct sock
*sk
= chan
->sk
;
4042 BT_DBG("chan->scid %d", chan
->scid
);
4044 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4045 if (!status
&& encrypt
) {
4046 chan
->sec_level
= hcon
->sec_level
;
4047 l2cap_chan_ready(sk
);
4054 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4059 if (!status
&& (chan
->state
== BT_CONNECTED
||
4060 chan
->state
== BT_CONFIG
)) {
4061 l2cap_check_encryption(chan
, encrypt
);
4066 if (chan
->state
== BT_CONNECT
) {
4068 struct l2cap_conn_req req
;
4069 req
.scid
= cpu_to_le16(chan
->scid
);
4070 req
.psm
= chan
->psm
;
4072 chan
->ident
= l2cap_get_ident(conn
);
4073 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4075 l2cap_send_cmd(conn
, chan
->ident
,
4076 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4078 __clear_chan_timer(chan
);
4079 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4081 } else if (chan
->state
== BT_CONNECT2
) {
4082 struct l2cap_conn_rsp rsp
;
4086 if (bt_sk(sk
)->defer_setup
) {
4087 struct sock
*parent
= bt_sk(sk
)->parent
;
4088 res
= L2CAP_CR_PEND
;
4089 stat
= L2CAP_CS_AUTHOR_PEND
;
4091 parent
->sk_data_ready(parent
, 0);
4093 l2cap_state_change(chan
, BT_CONFIG
);
4094 res
= L2CAP_CR_SUCCESS
;
4095 stat
= L2CAP_CS_NO_INFO
;
4098 l2cap_state_change(chan
, BT_DISCONN
);
4099 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4100 res
= L2CAP_CR_SEC_BLOCK
;
4101 stat
= L2CAP_CS_NO_INFO
;
4104 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4105 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4106 rsp
.result
= cpu_to_le16(res
);
4107 rsp
.status
= cpu_to_le16(stat
);
4108 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4115 read_unlock(&conn
->chan_lock
);
4120 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4122 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4125 conn
= l2cap_conn_add(hcon
, 0);
4130 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4132 if (!(flags
& ACL_CONT
)) {
4133 struct l2cap_hdr
*hdr
;
4134 struct l2cap_chan
*chan
;
4139 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4140 kfree_skb(conn
->rx_skb
);
4141 conn
->rx_skb
= NULL
;
4143 l2cap_conn_unreliable(conn
, ECOMM
);
4146 /* Start fragment always begin with Basic L2CAP header */
4147 if (skb
->len
< L2CAP_HDR_SIZE
) {
4148 BT_ERR("Frame is too short (len %d)", skb
->len
);
4149 l2cap_conn_unreliable(conn
, ECOMM
);
4153 hdr
= (struct l2cap_hdr
*) skb
->data
;
4154 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4155 cid
= __le16_to_cpu(hdr
->cid
);
4157 if (len
== skb
->len
) {
4158 /* Complete frame received */
4159 l2cap_recv_frame(conn
, skb
);
4163 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4165 if (skb
->len
> len
) {
4166 BT_ERR("Frame is too long (len %d, expected len %d)",
4168 l2cap_conn_unreliable(conn
, ECOMM
);
4172 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4174 if (chan
&& chan
->sk
) {
4175 struct sock
*sk
= chan
->sk
;
4177 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4178 BT_ERR("Frame exceeding recv MTU (len %d, "
4182 l2cap_conn_unreliable(conn
, ECOMM
);
4188 /* Allocate skb for the complete frame (with header) */
4189 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4193 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4195 conn
->rx_len
= len
- skb
->len
;
4197 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4199 if (!conn
->rx_len
) {
4200 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4201 l2cap_conn_unreliable(conn
, ECOMM
);
4205 if (skb
->len
> conn
->rx_len
) {
4206 BT_ERR("Fragment is too long (len %d, expected %d)",
4207 skb
->len
, conn
->rx_len
);
4208 kfree_skb(conn
->rx_skb
);
4209 conn
->rx_skb
= NULL
;
4211 l2cap_conn_unreliable(conn
, ECOMM
);
4215 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4217 conn
->rx_len
-= skb
->len
;
4219 if (!conn
->rx_len
) {
4220 /* Complete frame received */
4221 l2cap_recv_frame(conn
, conn
->rx_skb
);
4222 conn
->rx_skb
= NULL
;
4231 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4233 struct l2cap_chan
*c
;
4235 read_lock_bh(&chan_list_lock
);
4237 list_for_each_entry(c
, &chan_list
, global_l
) {
4238 struct sock
*sk
= c
->sk
;
4240 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4241 batostr(&bt_sk(sk
)->src
),
4242 batostr(&bt_sk(sk
)->dst
),
4243 c
->state
, __le16_to_cpu(c
->psm
),
4244 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4245 c
->sec_level
, c
->mode
);
4248 read_unlock_bh(&chan_list_lock
);
4253 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4255 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4258 static const struct file_operations l2cap_debugfs_fops
= {
4259 .open
= l2cap_debugfs_open
,
4261 .llseek
= seq_lseek
,
4262 .release
= single_release
,
4265 static struct dentry
*l2cap_debugfs
;
4267 static struct hci_proto l2cap_hci_proto
= {
4269 .id
= HCI_PROTO_L2CAP
,
4270 .connect_ind
= l2cap_connect_ind
,
4271 .connect_cfm
= l2cap_connect_cfm
,
4272 .disconn_ind
= l2cap_disconn_ind
,
4273 .disconn_cfm
= l2cap_disconn_cfm
,
4274 .security_cfm
= l2cap_security_cfm
,
4275 .recv_acldata
= l2cap_recv_acldata
4278 int __init
l2cap_init(void)
4282 err
= l2cap_init_sockets();
4286 err
= hci_register_proto(&l2cap_hci_proto
);
4288 BT_ERR("L2CAP protocol registration failed");
4289 bt_sock_unregister(BTPROTO_L2CAP
);
4294 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4295 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4297 BT_ERR("Failed to create L2CAP debug file");
4303 l2cap_cleanup_sockets();
4307 void l2cap_exit(void)
4309 debugfs_remove(l2cap_debugfs
);
4311 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4312 BT_ERR("L2CAP protocol unregistration failed");
4314 l2cap_cleanup_sockets();
4317 module_param(disable_ertm
, bool, 0644);
4318 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");