2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
62 static u8 l2cap_fixed_chan
[8] = { 0x02, };
64 static LIST_HEAD(chan_list
);
65 static DEFINE_RWLOCK(chan_list_lock
);
67 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
68 u8 code
, u8 ident
, u16 dlen
, void *data
);
69 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
71 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
72 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
73 struct l2cap_chan
*chan
, int err
);
75 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan
*c
)
81 atomic_inc(&c
->refcnt
);
84 static inline void chan_put(struct l2cap_chan
*c
)
86 if (atomic_dec_and_test(&c
->refcnt
))
90 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
94 list_for_each_entry(c
, &conn
->chan_l
, list
) {
102 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
104 struct l2cap_chan
*c
;
106 list_for_each_entry(c
, &conn
->chan_l
, list
) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
117 struct l2cap_chan
*c
;
119 read_lock(&conn
->chan_lock
);
120 c
= __l2cap_get_chan_by_scid(conn
, cid
);
123 read_unlock(&conn
->chan_lock
);
127 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
129 struct l2cap_chan
*c
;
131 list_for_each_entry(c
, &conn
->chan_l
, list
) {
132 if (c
->ident
== ident
)
138 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
140 struct l2cap_chan
*c
;
142 read_lock(&conn
->chan_lock
);
143 c
= __l2cap_get_chan_by_ident(conn
, ident
);
146 read_unlock(&conn
->chan_lock
);
150 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
152 struct l2cap_chan
*c
;
154 list_for_each_entry(c
, &chan_list
, global_l
) {
155 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
164 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
168 write_lock_bh(&chan_list_lock
);
170 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
183 for (p
= 0x1001; p
< 0x1100; p
+= 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
185 chan
->psm
= cpu_to_le16(p
);
186 chan
->sport
= cpu_to_le16(p
);
193 write_unlock_bh(&chan_list_lock
);
197 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
199 write_lock_bh(&chan_list_lock
);
203 write_unlock_bh(&chan_list_lock
);
208 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
210 u16 cid
= L2CAP_CID_DYN_START
;
212 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
213 if (!__l2cap_get_chan_by_scid(conn
, cid
))
220 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
222 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->state
, timeout
);
224 if (!mod_timer(timer
, jiffies
+ msecs_to_jiffies(timeout
)))
228 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
230 BT_DBG("chan %p state %d", chan
, chan
->state
);
232 if (timer_pending(timer
) && del_timer(timer
))
236 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
239 chan
->ops
->state_change(chan
->data
, state
);
242 static void l2cap_chan_timeout(unsigned long arg
)
244 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
245 struct sock
*sk
= chan
->sk
;
248 BT_DBG("chan %p state %d", chan
, chan
->state
);
252 if (sock_owned_by_user(sk
)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan
, HZ
/ 5);
260 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
261 reason
= ECONNREFUSED
;
262 else if (chan
->state
== BT_CONNECT
&&
263 chan
->sec_level
!= BT_SECURITY_SDP
)
264 reason
= ECONNREFUSED
;
268 l2cap_chan_close(chan
, reason
);
272 chan
->ops
->close(chan
->data
);
276 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
278 struct l2cap_chan
*chan
;
280 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
286 write_lock_bh(&chan_list_lock
);
287 list_add(&chan
->global_l
, &chan_list
);
288 write_unlock_bh(&chan_list_lock
);
290 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
292 chan
->state
= BT_OPEN
;
294 atomic_set(&chan
->refcnt
, 1);
299 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
301 write_lock_bh(&chan_list_lock
);
302 list_del(&chan
->global_l
);
303 write_unlock_bh(&chan_list_lock
);
308 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
311 chan
->psm
, chan
->dcid
);
313 conn
->disc_reason
= 0x13;
317 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
318 if (conn
->hcon
->type
== LE_LINK
) {
320 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
321 chan
->scid
= L2CAP_CID_LE_DATA
;
322 chan
->dcid
= L2CAP_CID_LE_DATA
;
324 /* Alloc CID for connection-oriented socket */
325 chan
->scid
= l2cap_alloc_cid(conn
);
326 chan
->omtu
= L2CAP_DEFAULT_MTU
;
328 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
329 /* Connectionless socket */
330 chan
->scid
= L2CAP_CID_CONN_LESS
;
331 chan
->dcid
= L2CAP_CID_CONN_LESS
;
332 chan
->omtu
= L2CAP_DEFAULT_MTU
;
334 /* Raw socket can send/recv signalling messages only */
335 chan
->scid
= L2CAP_CID_SIGNALING
;
336 chan
->dcid
= L2CAP_CID_SIGNALING
;
337 chan
->omtu
= L2CAP_DEFAULT_MTU
;
342 list_add(&chan
->list
, &conn
->chan_l
);
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
349 struct sock
*sk
= chan
->sk
;
350 struct l2cap_conn
*conn
= chan
->conn
;
351 struct sock
*parent
= bt_sk(sk
)->parent
;
353 __clear_chan_timer(chan
);
355 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
358 /* Delete from channel list */
359 write_lock_bh(&conn
->chan_lock
);
360 list_del(&chan
->list
);
361 write_unlock_bh(&conn
->chan_lock
);
365 hci_conn_put(conn
->hcon
);
368 l2cap_state_change(chan
, BT_CLOSED
);
369 sock_set_flag(sk
, SOCK_ZAPPED
);
375 bt_accept_unlink(sk
);
376 parent
->sk_data_ready(parent
, 0);
378 sk
->sk_state_change(sk
);
380 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
381 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
384 skb_queue_purge(&chan
->tx_q
);
386 if (chan
->mode
== L2CAP_MODE_ERTM
) {
387 struct srej_list
*l
, *tmp
;
389 __clear_retrans_timer(chan
);
390 __clear_monitor_timer(chan
);
391 __clear_ack_timer(chan
);
393 skb_queue_purge(&chan
->srej_q
);
395 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
402 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
406 BT_DBG("parent %p", parent
);
408 /* Close not yet accepted channels */
409 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
410 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
411 __clear_chan_timer(chan
);
413 l2cap_chan_close(chan
, ECONNRESET
);
415 chan
->ops
->close(chan
->data
);
419 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
421 struct l2cap_conn
*conn
= chan
->conn
;
422 struct sock
*sk
= chan
->sk
;
424 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
426 switch (chan
->state
) {
428 l2cap_chan_cleanup_listen(sk
);
430 l2cap_state_change(chan
, BT_CLOSED
);
431 sock_set_flag(sk
, SOCK_ZAPPED
);
436 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
437 conn
->hcon
->type
== ACL_LINK
) {
438 __clear_chan_timer(chan
);
439 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
440 l2cap_send_disconn_req(conn
, chan
, reason
);
442 l2cap_chan_del(chan
, reason
);
446 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
447 conn
->hcon
->type
== ACL_LINK
) {
448 struct l2cap_conn_rsp rsp
;
451 if (bt_sk(sk
)->defer_setup
)
452 result
= L2CAP_CR_SEC_BLOCK
;
454 result
= L2CAP_CR_BAD_PSM
;
455 l2cap_state_change(chan
, BT_DISCONN
);
457 rsp
.scid
= cpu_to_le16(chan
->dcid
);
458 rsp
.dcid
= cpu_to_le16(chan
->scid
);
459 rsp
.result
= cpu_to_le16(result
);
460 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
461 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
465 l2cap_chan_del(chan
, reason
);
470 l2cap_chan_del(chan
, reason
);
474 sock_set_flag(sk
, SOCK_ZAPPED
);
479 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
481 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
482 switch (chan
->sec_level
) {
483 case BT_SECURITY_HIGH
:
484 return HCI_AT_DEDICATED_BONDING_MITM
;
485 case BT_SECURITY_MEDIUM
:
486 return HCI_AT_DEDICATED_BONDING
;
488 return HCI_AT_NO_BONDING
;
490 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
491 if (chan
->sec_level
== BT_SECURITY_LOW
)
492 chan
->sec_level
= BT_SECURITY_SDP
;
494 if (chan
->sec_level
== BT_SECURITY_HIGH
)
495 return HCI_AT_NO_BONDING_MITM
;
497 return HCI_AT_NO_BONDING
;
499 switch (chan
->sec_level
) {
500 case BT_SECURITY_HIGH
:
501 return HCI_AT_GENERAL_BONDING_MITM
;
502 case BT_SECURITY_MEDIUM
:
503 return HCI_AT_GENERAL_BONDING
;
505 return HCI_AT_NO_BONDING
;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
513 struct l2cap_conn
*conn
= chan
->conn
;
516 auth_type
= l2cap_get_auth_type(chan
);
518 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
521 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn
->lock
);
533 if (++conn
->tx_ident
> 128)
538 spin_unlock_bh(&conn
->lock
);
543 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
545 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
548 BT_DBG("code 0x%2.2x", code
);
553 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
554 flags
= ACL_START_NO_FLUSH
;
558 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
560 hci_send_acl(conn
->hcon
, skb
, flags
);
563 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
566 struct l2cap_hdr
*lh
;
567 struct l2cap_conn
*conn
= chan
->conn
;
568 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
571 if (chan
->state
!= BT_CONNECTED
)
574 if (chan
->fcs
== L2CAP_FCS_CRC16
)
577 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
579 count
= min_t(unsigned int, conn
->mtu
, hlen
);
580 control
|= L2CAP_CTRL_FRAME_TYPE
;
582 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
583 control
|= L2CAP_CTRL_FINAL
;
585 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
586 control
|= L2CAP_CTRL_POLL
;
588 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
592 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
593 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
594 lh
->cid
= cpu_to_le16(chan
->dcid
);
595 put_unaligned_le16(control
, skb_put(skb
, 2));
597 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
598 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
599 put_unaligned_le16(fcs
, skb_put(skb
, 2));
602 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
603 flags
= ACL_START_NO_FLUSH
;
607 bt_cb(skb
)->force_active
= chan
->force_active
;
609 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
614 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
615 control
|= L2CAP_SUPER_RCV_NOT_READY
;
616 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
618 control
|= L2CAP_SUPER_RCV_READY
;
620 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
622 l2cap_send_sframe(chan
, control
);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
627 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
630 static void l2cap_do_start(struct l2cap_chan
*chan
)
632 struct l2cap_conn
*conn
= chan
->conn
;
634 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
635 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
638 if (l2cap_check_security(chan
) &&
639 __l2cap_no_conn_pending(chan
)) {
640 struct l2cap_conn_req req
;
641 req
.scid
= cpu_to_le16(chan
->scid
);
644 chan
->ident
= l2cap_get_ident(conn
);
645 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
647 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
651 struct l2cap_info_req req
;
652 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
654 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
655 conn
->info_ident
= l2cap_get_ident(conn
);
657 mod_timer(&conn
->info_timer
, jiffies
+
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
660 l2cap_send_cmd(conn
, conn
->info_ident
,
661 L2CAP_INFO_REQ
, sizeof(req
), &req
);
665 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
667 u32 local_feat_mask
= l2cap_feat_mask
;
669 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
672 case L2CAP_MODE_ERTM
:
673 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
674 case L2CAP_MODE_STREAMING
:
675 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
681 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
684 struct l2cap_disconn_req req
;
691 if (chan
->mode
== L2CAP_MODE_ERTM
) {
692 __clear_retrans_timer(chan
);
693 __clear_monitor_timer(chan
);
694 __clear_ack_timer(chan
);
697 req
.dcid
= cpu_to_le16(chan
->dcid
);
698 req
.scid
= cpu_to_le16(chan
->scid
);
699 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
700 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
702 l2cap_state_change(chan
, BT_DISCONN
);
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn
*conn
)
709 struct l2cap_chan
*chan
, *tmp
;
711 BT_DBG("conn %p", conn
);
713 read_lock(&conn
->chan_lock
);
715 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
716 struct sock
*sk
= chan
->sk
;
720 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
725 if (chan
->state
== BT_CONNECT
) {
726 struct l2cap_conn_req req
;
728 if (!l2cap_check_security(chan
) ||
729 !__l2cap_no_conn_pending(chan
)) {
734 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
735 && test_bit(CONF_STATE2_DEVICE
,
736 &chan
->conf_state
)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn
->chan_lock
);
740 l2cap_chan_close(chan
, ECONNRESET
);
741 read_lock(&conn
->chan_lock
);
746 req
.scid
= cpu_to_le16(chan
->scid
);
749 chan
->ident
= l2cap_get_ident(conn
);
750 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
752 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
755 } else if (chan
->state
== BT_CONNECT2
) {
756 struct l2cap_conn_rsp rsp
;
758 rsp
.scid
= cpu_to_le16(chan
->dcid
);
759 rsp
.dcid
= cpu_to_le16(chan
->scid
);
761 if (l2cap_check_security(chan
)) {
762 if (bt_sk(sk
)->defer_setup
) {
763 struct sock
*parent
= bt_sk(sk
)->parent
;
764 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
765 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
767 parent
->sk_data_ready(parent
, 0);
770 l2cap_state_change(chan
, BT_CONFIG
);
771 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
772 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
775 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
776 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
779 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
782 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
783 rsp
.result
!= L2CAP_CR_SUCCESS
) {
788 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
789 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
790 l2cap_build_conf_req(chan
, buf
), buf
);
791 chan
->num_conf_req
++;
797 read_unlock(&conn
->chan_lock
);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
805 struct l2cap_chan
*c
, *c1
= NULL
;
807 read_lock(&chan_list_lock
);
809 list_for_each_entry(c
, &chan_list
, global_l
) {
810 struct sock
*sk
= c
->sk
;
812 if (state
&& c
->state
!= state
)
815 if (c
->scid
== cid
) {
817 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
818 read_unlock(&chan_list_lock
);
823 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
828 read_unlock(&chan_list_lock
);
833 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
835 struct sock
*parent
, *sk
;
836 struct l2cap_chan
*chan
, *pchan
;
840 /* Check if we have socket listening on cid */
841 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
848 bh_lock_sock(parent
);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent
)) {
852 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
856 chan
= pchan
->ops
->new_connection(pchan
->data
);
862 write_lock_bh(&conn
->chan_lock
);
864 hci_conn_hold(conn
->hcon
);
866 bacpy(&bt_sk(sk
)->src
, conn
->src
);
867 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
869 bt_accept_enqueue(parent
, sk
);
871 __l2cap_chan_add(conn
, chan
);
873 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
875 l2cap_state_change(chan
, BT_CONNECTED
);
876 parent
->sk_data_ready(parent
, 0);
878 write_unlock_bh(&conn
->chan_lock
);
881 bh_unlock_sock(parent
);
884 static void l2cap_chan_ready(struct sock
*sk
)
886 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
887 struct sock
*parent
= bt_sk(sk
)->parent
;
889 BT_DBG("sk %p, parent %p", sk
, parent
);
891 chan
->conf_state
= 0;
892 __clear_chan_timer(chan
);
894 l2cap_state_change(chan
, BT_CONNECTED
);
895 sk
->sk_state_change(sk
);
898 parent
->sk_data_ready(parent
, 0);
901 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
903 struct l2cap_chan
*chan
;
905 BT_DBG("conn %p", conn
);
907 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
908 l2cap_le_conn_ready(conn
);
910 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
911 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
913 read_lock(&conn
->chan_lock
);
915 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
916 struct sock
*sk
= chan
->sk
;
920 if (conn
->hcon
->type
== LE_LINK
) {
921 if (smp_conn_security(conn
, chan
->sec_level
))
922 l2cap_chan_ready(sk
);
924 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
925 __clear_chan_timer(chan
);
926 l2cap_state_change(chan
, BT_CONNECTED
);
927 sk
->sk_state_change(sk
);
929 } else if (chan
->state
== BT_CONNECT
)
930 l2cap_do_start(chan
);
935 read_unlock(&conn
->chan_lock
);
938 /* Notify sockets that we cannot guaranty reliability anymore */
939 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
941 struct l2cap_chan
*chan
;
943 BT_DBG("conn %p", conn
);
945 read_lock(&conn
->chan_lock
);
947 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
948 struct sock
*sk
= chan
->sk
;
950 if (chan
->force_reliable
)
954 read_unlock(&conn
->chan_lock
);
957 static void l2cap_info_timeout(unsigned long arg
)
959 struct l2cap_conn
*conn
= (void *) arg
;
961 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
962 conn
->info_ident
= 0;
964 l2cap_conn_start(conn
);
967 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
969 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
970 struct l2cap_chan
*chan
, *l
;
976 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
978 kfree_skb(conn
->rx_skb
);
981 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
984 l2cap_chan_del(chan
, err
);
986 chan
->ops
->close(chan
->data
);
989 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
990 del_timer_sync(&conn
->info_timer
);
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
993 del_timer(&conn
->security_timer
);
997 hcon
->l2cap_data
= NULL
;
1001 static void security_timeout(unsigned long arg
)
1003 struct l2cap_conn
*conn
= (void *) arg
;
1005 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1008 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1010 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1015 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1019 hcon
->l2cap_data
= conn
;
1022 BT_DBG("hcon %p conn %p", hcon
, conn
);
1024 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1025 conn
->mtu
= hcon
->hdev
->le_mtu
;
1027 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1029 conn
->src
= &hcon
->hdev
->bdaddr
;
1030 conn
->dst
= &hcon
->dst
;
1032 conn
->feat_mask
= 0;
1034 spin_lock_init(&conn
->lock
);
1035 rwlock_init(&conn
->chan_lock
);
1037 INIT_LIST_HEAD(&conn
->chan_l
);
1039 if (hcon
->type
== LE_LINK
)
1040 setup_timer(&conn
->security_timer
, security_timeout
,
1041 (unsigned long) conn
);
1043 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1044 (unsigned long) conn
);
1046 conn
->disc_reason
= 0x13;
1051 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1053 write_lock_bh(&conn
->chan_lock
);
1054 __l2cap_chan_add(conn
, chan
);
1055 write_unlock_bh(&conn
->chan_lock
);
1058 /* ---- Socket interface ---- */
1060 /* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1063 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1065 struct l2cap_chan
*c
, *c1
= NULL
;
1067 read_lock(&chan_list_lock
);
1069 list_for_each_entry(c
, &chan_list
, global_l
) {
1070 struct sock
*sk
= c
->sk
;
1072 if (state
&& c
->state
!= state
)
1075 if (c
->psm
== psm
) {
1077 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1078 read_unlock(&chan_list_lock
);
1083 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1088 read_unlock(&chan_list_lock
);
1093 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1095 struct sock
*sk
= chan
->sk
;
1096 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1097 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1098 struct l2cap_conn
*conn
;
1099 struct hci_conn
*hcon
;
1100 struct hci_dev
*hdev
;
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1107 hdev
= hci_get_route(dst
, src
);
1109 return -EHOSTUNREACH
;
1111 hci_dev_lock_bh(hdev
);
1113 auth_type
= l2cap_get_auth_type(chan
);
1115 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1116 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1117 chan
->sec_level
, auth_type
);
1119 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1120 chan
->sec_level
, auth_type
);
1123 err
= PTR_ERR(hcon
);
1127 conn
= l2cap_conn_add(hcon
, 0);
1134 /* Update source addr of the socket */
1135 bacpy(src
, conn
->src
);
1137 l2cap_chan_add(conn
, chan
);
1139 l2cap_state_change(chan
, BT_CONNECT
);
1140 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1142 if (hcon
->state
== BT_CONNECTED
) {
1143 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1144 __clear_chan_timer(chan
);
1145 if (l2cap_check_security(chan
))
1146 l2cap_state_change(chan
, BT_CONNECTED
);
1148 l2cap_do_start(chan
);
1154 hci_dev_unlock_bh(hdev
);
1159 int __l2cap_wait_ack(struct sock
*sk
)
1161 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1162 DECLARE_WAITQUEUE(wait
, current
);
1166 add_wait_queue(sk_sleep(sk
), &wait
);
1167 set_current_state(TASK_INTERRUPTIBLE
);
1168 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1172 if (signal_pending(current
)) {
1173 err
= sock_intr_errno(timeo
);
1178 timeo
= schedule_timeout(timeo
);
1180 set_current_state(TASK_INTERRUPTIBLE
);
1182 err
= sock_error(sk
);
1186 set_current_state(TASK_RUNNING
);
1187 remove_wait_queue(sk_sleep(sk
), &wait
);
1191 static void l2cap_monitor_timeout(unsigned long arg
)
1193 struct l2cap_chan
*chan
= (void *) arg
;
1194 struct sock
*sk
= chan
->sk
;
1196 BT_DBG("chan %p", chan
);
1199 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1200 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1205 chan
->retry_count
++;
1206 __set_monitor_timer(chan
);
1208 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1212 static void l2cap_retrans_timeout(unsigned long arg
)
1214 struct l2cap_chan
*chan
= (void *) arg
;
1215 struct sock
*sk
= chan
->sk
;
1217 BT_DBG("chan %p", chan
);
1220 chan
->retry_count
= 1;
1221 __set_monitor_timer(chan
);
1223 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1225 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1229 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1231 struct sk_buff
*skb
;
1233 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1234 chan
->unacked_frames
) {
1235 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1238 skb
= skb_dequeue(&chan
->tx_q
);
1241 chan
->unacked_frames
--;
1244 if (!chan
->unacked_frames
)
1245 __clear_retrans_timer(chan
);
1248 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1250 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1253 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1255 if (!chan
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1256 flags
= ACL_START_NO_FLUSH
;
1260 bt_cb(skb
)->force_active
= chan
->force_active
;
1261 hci_send_acl(hcon
, skb
, flags
);
1264 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1266 struct sk_buff
*skb
;
1269 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1270 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1271 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1272 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1274 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1275 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1276 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1279 l2cap_do_send(chan
, skb
);
1281 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1285 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1287 struct sk_buff
*skb
, *tx_skb
;
1290 skb
= skb_peek(&chan
->tx_q
);
1295 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1298 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1301 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1303 if (chan
->remote_max_tx
&&
1304 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1305 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1309 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1310 bt_cb(skb
)->retries
++;
1311 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1312 control
&= L2CAP_CTRL_SAR
;
1314 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1315 control
|= L2CAP_CTRL_FINAL
;
1317 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1318 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1320 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1322 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1323 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1324 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1327 l2cap_do_send(chan
, tx_skb
);
1330 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1332 struct sk_buff
*skb
, *tx_skb
;
1336 if (chan
->state
!= BT_CONNECTED
)
1339 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1341 if (chan
->remote_max_tx
&&
1342 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1343 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1347 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1349 bt_cb(skb
)->retries
++;
1351 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1352 control
&= L2CAP_CTRL_SAR
;
1354 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1355 control
|= L2CAP_CTRL_FINAL
;
1357 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1358 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1359 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1362 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1363 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1364 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1367 l2cap_do_send(chan
, tx_skb
);
1369 __set_retrans_timer(chan
);
1371 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1372 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1374 if (bt_cb(skb
)->retries
== 1)
1375 chan
->unacked_frames
++;
1377 chan
->frames_sent
++;
1379 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1380 chan
->tx_send_head
= NULL
;
1382 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1390 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1394 if (!skb_queue_empty(&chan
->tx_q
))
1395 chan
->tx_send_head
= chan
->tx_q
.next
;
1397 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1398 ret
= l2cap_ertm_send(chan
);
1402 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1406 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1408 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1409 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1410 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1411 l2cap_send_sframe(chan
, control
);
1415 if (l2cap_ertm_send(chan
) > 0)
1418 control
|= L2CAP_SUPER_RCV_READY
;
1419 l2cap_send_sframe(chan
, control
);
1422 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1424 struct srej_list
*tail
;
1427 control
= L2CAP_SUPER_SELECT_REJECT
;
1428 control
|= L2CAP_CTRL_FINAL
;
1430 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1431 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1433 l2cap_send_sframe(chan
, control
);
1436 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1438 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1439 struct sk_buff
**frag
;
1442 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1448 /* Continuation fragments (no L2CAP header) */
1449 frag
= &skb_shinfo(skb
)->frag_list
;
1451 count
= min_t(unsigned int, conn
->mtu
, len
);
1453 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1456 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1462 frag
= &(*frag
)->next
;
1468 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1470 struct sock
*sk
= chan
->sk
;
1471 struct l2cap_conn
*conn
= chan
->conn
;
1472 struct sk_buff
*skb
;
1473 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1474 struct l2cap_hdr
*lh
;
1476 BT_DBG("sk %p len %d", sk
, (int)len
);
1478 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1479 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1480 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1482 return ERR_PTR(err
);
1484 /* Create L2CAP header */
1485 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1486 lh
->cid
= cpu_to_le16(chan
->dcid
);
1487 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1488 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1490 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1491 if (unlikely(err
< 0)) {
1493 return ERR_PTR(err
);
1498 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1500 struct sock
*sk
= chan
->sk
;
1501 struct l2cap_conn
*conn
= chan
->conn
;
1502 struct sk_buff
*skb
;
1503 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1504 struct l2cap_hdr
*lh
;
1506 BT_DBG("sk %p len %d", sk
, (int)len
);
1508 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1509 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1510 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1512 return ERR_PTR(err
);
1514 /* Create L2CAP header */
1515 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1516 lh
->cid
= cpu_to_le16(chan
->dcid
);
1517 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1519 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1520 if (unlikely(err
< 0)) {
1522 return ERR_PTR(err
);
1527 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1529 struct sock
*sk
= chan
->sk
;
1530 struct l2cap_conn
*conn
= chan
->conn
;
1531 struct sk_buff
*skb
;
1532 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1533 struct l2cap_hdr
*lh
;
1535 BT_DBG("sk %p len %d", sk
, (int)len
);
1538 return ERR_PTR(-ENOTCONN
);
1543 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1546 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1547 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1548 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1550 return ERR_PTR(err
);
1552 /* Create L2CAP header */
1553 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1554 lh
->cid
= cpu_to_le16(chan
->dcid
);
1555 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1556 put_unaligned_le16(control
, skb_put(skb
, 2));
1558 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1560 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1561 if (unlikely(err
< 0)) {
1563 return ERR_PTR(err
);
1566 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1567 put_unaligned_le16(0, skb_put(skb
, 2));
1569 bt_cb(skb
)->retries
= 0;
1573 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1575 struct sk_buff
*skb
;
1576 struct sk_buff_head sar_queue
;
1580 skb_queue_head_init(&sar_queue
);
1581 control
= L2CAP_SDU_START
;
1582 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1584 return PTR_ERR(skb
);
1586 __skb_queue_tail(&sar_queue
, skb
);
1587 len
-= chan
->remote_mps
;
1588 size
+= chan
->remote_mps
;
1593 if (len
> chan
->remote_mps
) {
1594 control
= L2CAP_SDU_CONTINUE
;
1595 buflen
= chan
->remote_mps
;
1597 control
= L2CAP_SDU_END
;
1601 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1603 skb_queue_purge(&sar_queue
);
1604 return PTR_ERR(skb
);
1607 __skb_queue_tail(&sar_queue
, skb
);
1611 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1612 if (chan
->tx_send_head
== NULL
)
1613 chan
->tx_send_head
= sar_queue
.next
;
1618 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1620 struct sk_buff
*skb
;
1624 /* Connectionless channel */
1625 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1626 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1628 return PTR_ERR(skb
);
1630 l2cap_do_send(chan
, skb
);
1634 switch (chan
->mode
) {
1635 case L2CAP_MODE_BASIC
:
1636 /* Check outgoing MTU */
1637 if (len
> chan
->omtu
)
1640 /* Create a basic PDU */
1641 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1643 return PTR_ERR(skb
);
1645 l2cap_do_send(chan
, skb
);
1649 case L2CAP_MODE_ERTM
:
1650 case L2CAP_MODE_STREAMING
:
1651 /* Entire SDU fits into one PDU */
1652 if (len
<= chan
->remote_mps
) {
1653 control
= L2CAP_SDU_UNSEGMENTED
;
1654 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1657 return PTR_ERR(skb
);
1659 __skb_queue_tail(&chan
->tx_q
, skb
);
1661 if (chan
->tx_send_head
== NULL
)
1662 chan
->tx_send_head
= skb
;
1665 /* Segment SDU into multiples PDUs */
1666 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1671 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1672 l2cap_streaming_send(chan
);
1677 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1678 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1683 err
= l2cap_ertm_send(chan
);
1690 BT_DBG("bad state %1.1x", chan
->mode
);
1697 /* Copy frame to all raw sockets on that connection */
1698 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1700 struct sk_buff
*nskb
;
1701 struct l2cap_chan
*chan
;
1703 BT_DBG("conn %p", conn
);
1705 read_lock(&conn
->chan_lock
);
1706 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1707 struct sock
*sk
= chan
->sk
;
1708 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1711 /* Don't send frame to the socket it came from */
1714 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1718 if (chan
->ops
->recv(chan
->data
, nskb
))
1721 read_unlock(&conn
->chan_lock
);
1724 /* ---- L2CAP signalling commands ---- */
1725 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1726 u8 code
, u8 ident
, u16 dlen
, void *data
)
1728 struct sk_buff
*skb
, **frag
;
1729 struct l2cap_cmd_hdr
*cmd
;
1730 struct l2cap_hdr
*lh
;
1733 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1734 conn
, code
, ident
, dlen
);
1736 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1737 count
= min_t(unsigned int, conn
->mtu
, len
);
1739 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1743 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1744 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1746 if (conn
->hcon
->type
== LE_LINK
)
1747 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1749 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1751 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1754 cmd
->len
= cpu_to_le16(dlen
);
1757 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1758 memcpy(skb_put(skb
, count
), data
, count
);
1764 /* Continuation fragments (no L2CAP header) */
1765 frag
= &skb_shinfo(skb
)->frag_list
;
1767 count
= min_t(unsigned int, conn
->mtu
, len
);
1769 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1773 memcpy(skb_put(*frag
, count
), data
, count
);
1778 frag
= &(*frag
)->next
;
1788 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1790 struct l2cap_conf_opt
*opt
= *ptr
;
1793 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1801 *val
= *((u8
*) opt
->val
);
1805 *val
= get_unaligned_le16(opt
->val
);
1809 *val
= get_unaligned_le32(opt
->val
);
1813 *val
= (unsigned long) opt
->val
;
1817 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1821 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1823 struct l2cap_conf_opt
*opt
= *ptr
;
1825 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1832 *((u8
*) opt
->val
) = val
;
1836 put_unaligned_le16(val
, opt
->val
);
1840 put_unaligned_le32(val
, opt
->val
);
1844 memcpy(opt
->val
, (void *) val
, len
);
1848 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1851 static void l2cap_ack_timeout(unsigned long arg
)
1853 struct l2cap_chan
*chan
= (void *) arg
;
1855 bh_lock_sock(chan
->sk
);
1856 l2cap_send_ack(chan
);
1857 bh_unlock_sock(chan
->sk
);
1860 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1862 struct sock
*sk
= chan
->sk
;
1864 chan
->expected_ack_seq
= 0;
1865 chan
->unacked_frames
= 0;
1866 chan
->buffer_seq
= 0;
1867 chan
->num_acked
= 0;
1868 chan
->frames_sent
= 0;
1870 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1871 (unsigned long) chan
);
1872 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1873 (unsigned long) chan
);
1874 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1876 skb_queue_head_init(&chan
->srej_q
);
1878 INIT_LIST_HEAD(&chan
->srej_l
);
1881 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1884 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1887 case L2CAP_MODE_STREAMING
:
1888 case L2CAP_MODE_ERTM
:
1889 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1893 return L2CAP_MODE_BASIC
;
1897 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1899 struct l2cap_conf_req
*req
= data
;
1900 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1901 void *ptr
= req
->data
;
1903 BT_DBG("chan %p", chan
);
1905 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1908 switch (chan
->mode
) {
1909 case L2CAP_MODE_STREAMING
:
1910 case L2CAP_MODE_ERTM
:
1911 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
1916 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1921 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1922 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1924 switch (chan
->mode
) {
1925 case L2CAP_MODE_BASIC
:
1926 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1927 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1930 rfc
.mode
= L2CAP_MODE_BASIC
;
1932 rfc
.max_transmit
= 0;
1933 rfc
.retrans_timeout
= 0;
1934 rfc
.monitor_timeout
= 0;
1935 rfc
.max_pdu_size
= 0;
1937 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1938 (unsigned long) &rfc
);
1941 case L2CAP_MODE_ERTM
:
1942 rfc
.mode
= L2CAP_MODE_ERTM
;
1943 rfc
.txwin_size
= chan
->tx_win
;
1944 rfc
.max_transmit
= chan
->max_tx
;
1945 rfc
.retrans_timeout
= 0;
1946 rfc
.monitor_timeout
= 0;
1947 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1948 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1949 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1951 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1952 (unsigned long) &rfc
);
1954 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1957 if (chan
->fcs
== L2CAP_FCS_NONE
||
1958 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
1959 chan
->fcs
= L2CAP_FCS_NONE
;
1960 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1964 case L2CAP_MODE_STREAMING
:
1965 rfc
.mode
= L2CAP_MODE_STREAMING
;
1967 rfc
.max_transmit
= 0;
1968 rfc
.retrans_timeout
= 0;
1969 rfc
.monitor_timeout
= 0;
1970 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1971 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1972 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1974 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1975 (unsigned long) &rfc
);
1977 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1980 if (chan
->fcs
== L2CAP_FCS_NONE
||
1981 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
1982 chan
->fcs
= L2CAP_FCS_NONE
;
1983 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1988 req
->dcid
= cpu_to_le16(chan
->dcid
);
1989 req
->flags
= cpu_to_le16(0);
1994 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1996 struct l2cap_conf_rsp
*rsp
= data
;
1997 void *ptr
= rsp
->data
;
1998 void *req
= chan
->conf_req
;
1999 int len
= chan
->conf_len
;
2000 int type
, hint
, olen
;
2002 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2003 u16 mtu
= L2CAP_DEFAULT_MTU
;
2004 u16 result
= L2CAP_CONF_SUCCESS
;
2006 BT_DBG("chan %p", chan
);
2008 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2009 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2011 hint
= type
& L2CAP_CONF_HINT
;
2012 type
&= L2CAP_CONF_MASK
;
2015 case L2CAP_CONF_MTU
:
2019 case L2CAP_CONF_FLUSH_TO
:
2020 chan
->flush_to
= val
;
2023 case L2CAP_CONF_QOS
:
2026 case L2CAP_CONF_RFC
:
2027 if (olen
== sizeof(rfc
))
2028 memcpy(&rfc
, (void *) val
, olen
);
2031 case L2CAP_CONF_FCS
:
2032 if (val
== L2CAP_FCS_NONE
)
2033 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2041 result
= L2CAP_CONF_UNKNOWN
;
2042 *((u8
*) ptr
++) = type
;
2047 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2050 switch (chan
->mode
) {
2051 case L2CAP_MODE_STREAMING
:
2052 case L2CAP_MODE_ERTM
:
2053 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2054 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2055 chan
->conn
->feat_mask
);
2059 if (chan
->mode
!= rfc
.mode
)
2060 return -ECONNREFUSED
;
2066 if (chan
->mode
!= rfc
.mode
) {
2067 result
= L2CAP_CONF_UNACCEPT
;
2068 rfc
.mode
= chan
->mode
;
2070 if (chan
->num_conf_rsp
== 1)
2071 return -ECONNREFUSED
;
2073 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2074 sizeof(rfc
), (unsigned long) &rfc
);
2078 if (result
== L2CAP_CONF_SUCCESS
) {
2079 /* Configure output options and let the other side know
2080 * which ones we don't like. */
2082 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2083 result
= L2CAP_CONF_UNACCEPT
;
2086 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2088 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2091 case L2CAP_MODE_BASIC
:
2092 chan
->fcs
= L2CAP_FCS_NONE
;
2093 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2096 case L2CAP_MODE_ERTM
:
2097 chan
->remote_tx_win
= rfc
.txwin_size
;
2098 chan
->remote_max_tx
= rfc
.max_transmit
;
2100 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2101 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2103 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2105 rfc
.retrans_timeout
=
2106 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2107 rfc
.monitor_timeout
=
2108 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2110 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2112 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2113 sizeof(rfc
), (unsigned long) &rfc
);
2117 case L2CAP_MODE_STREAMING
:
2118 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2119 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2121 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2123 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2125 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2126 sizeof(rfc
), (unsigned long) &rfc
);
2131 result
= L2CAP_CONF_UNACCEPT
;
2133 memset(&rfc
, 0, sizeof(rfc
));
2134 rfc
.mode
= chan
->mode
;
2137 if (result
== L2CAP_CONF_SUCCESS
)
2138 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2140 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2141 rsp
->result
= cpu_to_le16(result
);
2142 rsp
->flags
= cpu_to_le16(0x0000);
2147 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2149 struct l2cap_conf_req
*req
= data
;
2150 void *ptr
= req
->data
;
2153 struct l2cap_conf_rfc rfc
;
2155 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2157 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2158 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2161 case L2CAP_CONF_MTU
:
2162 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2163 *result
= L2CAP_CONF_UNACCEPT
;
2164 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2167 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2170 case L2CAP_CONF_FLUSH_TO
:
2171 chan
->flush_to
= val
;
2172 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2176 case L2CAP_CONF_RFC
:
2177 if (olen
== sizeof(rfc
))
2178 memcpy(&rfc
, (void *)val
, olen
);
2180 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2181 rfc
.mode
!= chan
->mode
)
2182 return -ECONNREFUSED
;
2186 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2187 sizeof(rfc
), (unsigned long) &rfc
);
2192 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2193 return -ECONNREFUSED
;
2195 chan
->mode
= rfc
.mode
;
2197 if (*result
== L2CAP_CONF_SUCCESS
) {
2199 case L2CAP_MODE_ERTM
:
2200 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2201 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2202 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2204 case L2CAP_MODE_STREAMING
:
2205 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2209 req
->dcid
= cpu_to_le16(chan
->dcid
);
2210 req
->flags
= cpu_to_le16(0x0000);
2215 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2217 struct l2cap_conf_rsp
*rsp
= data
;
2218 void *ptr
= rsp
->data
;
2220 BT_DBG("chan %p", chan
);
2222 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2223 rsp
->result
= cpu_to_le16(result
);
2224 rsp
->flags
= cpu_to_le16(flags
);
2229 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2231 struct l2cap_conn_rsp rsp
;
2232 struct l2cap_conn
*conn
= chan
->conn
;
2235 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2236 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2237 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2238 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2239 l2cap_send_cmd(conn
, chan
->ident
,
2240 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2242 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2245 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2246 l2cap_build_conf_req(chan
, buf
), buf
);
2247 chan
->num_conf_req
++;
2250 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2254 struct l2cap_conf_rfc rfc
;
2256 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2258 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2261 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2262 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2265 case L2CAP_CONF_RFC
:
2266 if (olen
== sizeof(rfc
))
2267 memcpy(&rfc
, (void *)val
, olen
);
2274 case L2CAP_MODE_ERTM
:
2275 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2276 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2277 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2279 case L2CAP_MODE_STREAMING
:
2280 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2284 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2286 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2288 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2291 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2292 cmd
->ident
== conn
->info_ident
) {
2293 del_timer(&conn
->info_timer
);
2295 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2296 conn
->info_ident
= 0;
2298 l2cap_conn_start(conn
);
2304 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2306 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2307 struct l2cap_conn_rsp rsp
;
2308 struct l2cap_chan
*chan
= NULL
, *pchan
;
2309 struct sock
*parent
, *sk
= NULL
;
2310 int result
, status
= L2CAP_CS_NO_INFO
;
2312 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2313 __le16 psm
= req
->psm
;
2315 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2317 /* Check if we have socket listening on psm */
2318 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2320 result
= L2CAP_CR_BAD_PSM
;
2326 bh_lock_sock(parent
);
2328 /* Check if the ACL is secure enough (if not SDP) */
2329 if (psm
!= cpu_to_le16(0x0001) &&
2330 !hci_conn_check_link_mode(conn
->hcon
)) {
2331 conn
->disc_reason
= 0x05;
2332 result
= L2CAP_CR_SEC_BLOCK
;
2336 result
= L2CAP_CR_NO_MEM
;
2338 /* Check for backlog size */
2339 if (sk_acceptq_is_full(parent
)) {
2340 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2344 chan
= pchan
->ops
->new_connection(pchan
->data
);
2350 write_lock_bh(&conn
->chan_lock
);
2352 /* Check if we already have channel with that dcid */
2353 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2354 write_unlock_bh(&conn
->chan_lock
);
2355 sock_set_flag(sk
, SOCK_ZAPPED
);
2356 chan
->ops
->close(chan
->data
);
2360 hci_conn_hold(conn
->hcon
);
2362 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2363 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2367 bt_accept_enqueue(parent
, sk
);
2369 __l2cap_chan_add(conn
, chan
);
2373 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2375 chan
->ident
= cmd
->ident
;
2377 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2378 if (l2cap_check_security(chan
)) {
2379 if (bt_sk(sk
)->defer_setup
) {
2380 l2cap_state_change(chan
, BT_CONNECT2
);
2381 result
= L2CAP_CR_PEND
;
2382 status
= L2CAP_CS_AUTHOR_PEND
;
2383 parent
->sk_data_ready(parent
, 0);
2385 l2cap_state_change(chan
, BT_CONFIG
);
2386 result
= L2CAP_CR_SUCCESS
;
2387 status
= L2CAP_CS_NO_INFO
;
2390 l2cap_state_change(chan
, BT_CONNECT2
);
2391 result
= L2CAP_CR_PEND
;
2392 status
= L2CAP_CS_AUTHEN_PEND
;
2395 l2cap_state_change(chan
, BT_CONNECT2
);
2396 result
= L2CAP_CR_PEND
;
2397 status
= L2CAP_CS_NO_INFO
;
2400 write_unlock_bh(&conn
->chan_lock
);
2403 bh_unlock_sock(parent
);
2406 rsp
.scid
= cpu_to_le16(scid
);
2407 rsp
.dcid
= cpu_to_le16(dcid
);
2408 rsp
.result
= cpu_to_le16(result
);
2409 rsp
.status
= cpu_to_le16(status
);
2410 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2412 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2413 struct l2cap_info_req info
;
2414 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2416 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2417 conn
->info_ident
= l2cap_get_ident(conn
);
2419 mod_timer(&conn
->info_timer
, jiffies
+
2420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2422 l2cap_send_cmd(conn
, conn
->info_ident
,
2423 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2426 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2427 result
== L2CAP_CR_SUCCESS
) {
2429 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2430 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2431 l2cap_build_conf_req(chan
, buf
), buf
);
2432 chan
->num_conf_req
++;
2438 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2440 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2441 u16 scid
, dcid
, result
, status
;
2442 struct l2cap_chan
*chan
;
2446 scid
= __le16_to_cpu(rsp
->scid
);
2447 dcid
= __le16_to_cpu(rsp
->dcid
);
2448 result
= __le16_to_cpu(rsp
->result
);
2449 status
= __le16_to_cpu(rsp
->status
);
2451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2454 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2458 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2466 case L2CAP_CR_SUCCESS
:
2467 l2cap_state_change(chan
, BT_CONFIG
);
2470 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2472 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2475 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2476 l2cap_build_conf_req(chan
, req
), req
);
2477 chan
->num_conf_req
++;
2481 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2485 /* don't delete l2cap channel if sk is owned by user */
2486 if (sock_owned_by_user(sk
)) {
2487 l2cap_state_change(chan
, BT_DISCONN
);
2488 __clear_chan_timer(chan
);
2489 __set_chan_timer(chan
, HZ
/ 5);
2493 l2cap_chan_del(chan
, ECONNREFUSED
);
2501 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2503 /* FCS is enabled only in ERTM or streaming mode, if one or both
2506 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2507 chan
->fcs
= L2CAP_FCS_NONE
;
2508 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2509 chan
->fcs
= L2CAP_FCS_CRC16
;
2512 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2514 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2517 struct l2cap_chan
*chan
;
2521 dcid
= __le16_to_cpu(req
->dcid
);
2522 flags
= __le16_to_cpu(req
->flags
);
2524 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2526 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2532 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2533 struct l2cap_cmd_rej_cid rej
;
2535 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2536 rej
.scid
= cpu_to_le16(chan
->scid
);
2537 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2539 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2544 /* Reject if config buffer is too small. */
2545 len
= cmd_len
- sizeof(*req
);
2546 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2547 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2548 l2cap_build_conf_rsp(chan
, rsp
,
2549 L2CAP_CONF_REJECT
, flags
), rsp
);
2554 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2555 chan
->conf_len
+= len
;
2557 if (flags
& 0x0001) {
2558 /* Incomplete config. Send empty response. */
2559 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2560 l2cap_build_conf_rsp(chan
, rsp
,
2561 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2565 /* Complete config. */
2566 len
= l2cap_parse_conf_req(chan
, rsp
);
2568 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2572 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2573 chan
->num_conf_rsp
++;
2575 /* Reset config buffer. */
2578 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2581 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2582 set_default_fcs(chan
);
2584 l2cap_state_change(chan
, BT_CONNECTED
);
2586 chan
->next_tx_seq
= 0;
2587 chan
->expected_tx_seq
= 0;
2588 skb_queue_head_init(&chan
->tx_q
);
2589 if (chan
->mode
== L2CAP_MODE_ERTM
)
2590 l2cap_ertm_init(chan
);
2592 l2cap_chan_ready(sk
);
2596 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2598 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2599 l2cap_build_conf_req(chan
, buf
), buf
);
2600 chan
->num_conf_req
++;
2608 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2610 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2611 u16 scid
, flags
, result
;
2612 struct l2cap_chan
*chan
;
2614 int len
= cmd
->len
- sizeof(*rsp
);
2616 scid
= __le16_to_cpu(rsp
->scid
);
2617 flags
= __le16_to_cpu(rsp
->flags
);
2618 result
= __le16_to_cpu(rsp
->result
);
2620 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2621 scid
, flags
, result
);
2623 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2630 case L2CAP_CONF_SUCCESS
:
2631 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2634 case L2CAP_CONF_UNACCEPT
:
2635 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2638 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2639 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2643 /* throw out any old stored conf requests */
2644 result
= L2CAP_CONF_SUCCESS
;
2645 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2648 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2652 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2653 L2CAP_CONF_REQ
, len
, req
);
2654 chan
->num_conf_req
++;
2655 if (result
!= L2CAP_CONF_SUCCESS
)
2661 sk
->sk_err
= ECONNRESET
;
2662 __set_chan_timer(chan
, HZ
* 5);
2663 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2670 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2672 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2673 set_default_fcs(chan
);
2675 l2cap_state_change(chan
, BT_CONNECTED
);
2676 chan
->next_tx_seq
= 0;
2677 chan
->expected_tx_seq
= 0;
2678 skb_queue_head_init(&chan
->tx_q
);
2679 if (chan
->mode
== L2CAP_MODE_ERTM
)
2680 l2cap_ertm_init(chan
);
2682 l2cap_chan_ready(sk
);
2690 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2692 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2693 struct l2cap_disconn_rsp rsp
;
2695 struct l2cap_chan
*chan
;
2698 scid
= __le16_to_cpu(req
->scid
);
2699 dcid
= __le16_to_cpu(req
->dcid
);
2701 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2703 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2709 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2710 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2711 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2713 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2715 /* don't delete l2cap channel if sk is owned by user */
2716 if (sock_owned_by_user(sk
)) {
2717 l2cap_state_change(chan
, BT_DISCONN
);
2718 __clear_chan_timer(chan
);
2719 __set_chan_timer(chan
, HZ
/ 5);
2724 l2cap_chan_del(chan
, ECONNRESET
);
2727 chan
->ops
->close(chan
->data
);
2731 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2733 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2735 struct l2cap_chan
*chan
;
2738 scid
= __le16_to_cpu(rsp
->scid
);
2739 dcid
= __le16_to_cpu(rsp
->dcid
);
2741 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2743 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2749 /* don't delete l2cap channel if sk is owned by user */
2750 if (sock_owned_by_user(sk
)) {
2751 l2cap_state_change(chan
,BT_DISCONN
);
2752 __clear_chan_timer(chan
);
2753 __set_chan_timer(chan
, HZ
/ 5);
2758 l2cap_chan_del(chan
, 0);
2761 chan
->ops
->close(chan
->data
);
2765 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2767 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2770 type
= __le16_to_cpu(req
->type
);
2772 BT_DBG("type 0x%4.4x", type
);
2774 if (type
== L2CAP_IT_FEAT_MASK
) {
2776 u32 feat_mask
= l2cap_feat_mask
;
2777 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2778 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2779 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2781 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2783 put_unaligned_le32(feat_mask
, rsp
->data
);
2784 l2cap_send_cmd(conn
, cmd
->ident
,
2785 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2786 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2788 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2789 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2790 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2791 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2792 l2cap_send_cmd(conn
, cmd
->ident
,
2793 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2795 struct l2cap_info_rsp rsp
;
2796 rsp
.type
= cpu_to_le16(type
);
2797 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2798 l2cap_send_cmd(conn
, cmd
->ident
,
2799 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2805 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2807 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2810 type
= __le16_to_cpu(rsp
->type
);
2811 result
= __le16_to_cpu(rsp
->result
);
2813 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2815 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2816 if (cmd
->ident
!= conn
->info_ident
||
2817 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2820 del_timer(&conn
->info_timer
);
2822 if (result
!= L2CAP_IR_SUCCESS
) {
2823 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2824 conn
->info_ident
= 0;
2826 l2cap_conn_start(conn
);
2831 if (type
== L2CAP_IT_FEAT_MASK
) {
2832 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2834 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2835 struct l2cap_info_req req
;
2836 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2838 conn
->info_ident
= l2cap_get_ident(conn
);
2840 l2cap_send_cmd(conn
, conn
->info_ident
,
2841 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2843 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2844 conn
->info_ident
= 0;
2846 l2cap_conn_start(conn
);
2848 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2849 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2850 conn
->info_ident
= 0;
2852 l2cap_conn_start(conn
);
2858 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2863 if (min
> max
|| min
< 6 || max
> 3200)
2866 if (to_multiplier
< 10 || to_multiplier
> 3200)
2869 if (max
>= to_multiplier
* 8)
2872 max_latency
= (to_multiplier
* 8 / max
) - 1;
2873 if (latency
> 499 || latency
> max_latency
)
2879 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2880 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2882 struct hci_conn
*hcon
= conn
->hcon
;
2883 struct l2cap_conn_param_update_req
*req
;
2884 struct l2cap_conn_param_update_rsp rsp
;
2885 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2888 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2891 cmd_len
= __le16_to_cpu(cmd
->len
);
2892 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2895 req
= (struct l2cap_conn_param_update_req
*) data
;
2896 min
= __le16_to_cpu(req
->min
);
2897 max
= __le16_to_cpu(req
->max
);
2898 latency
= __le16_to_cpu(req
->latency
);
2899 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2901 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2902 min
, max
, latency
, to_multiplier
);
2904 memset(&rsp
, 0, sizeof(rsp
));
2906 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2908 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2910 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2912 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2916 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2921 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2922 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2926 switch (cmd
->code
) {
2927 case L2CAP_COMMAND_REJ
:
2928 l2cap_command_rej(conn
, cmd
, data
);
2931 case L2CAP_CONN_REQ
:
2932 err
= l2cap_connect_req(conn
, cmd
, data
);
2935 case L2CAP_CONN_RSP
:
2936 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2939 case L2CAP_CONF_REQ
:
2940 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2943 case L2CAP_CONF_RSP
:
2944 err
= l2cap_config_rsp(conn
, cmd
, data
);
2947 case L2CAP_DISCONN_REQ
:
2948 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2951 case L2CAP_DISCONN_RSP
:
2952 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2955 case L2CAP_ECHO_REQ
:
2956 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2959 case L2CAP_ECHO_RSP
:
2962 case L2CAP_INFO_REQ
:
2963 err
= l2cap_information_req(conn
, cmd
, data
);
2966 case L2CAP_INFO_RSP
:
2967 err
= l2cap_information_rsp(conn
, cmd
, data
);
2971 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2979 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2980 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2982 switch (cmd
->code
) {
2983 case L2CAP_COMMAND_REJ
:
2986 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2987 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2989 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2993 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2998 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2999 struct sk_buff
*skb
)
3001 u8
*data
= skb
->data
;
3003 struct l2cap_cmd_hdr cmd
;
3006 l2cap_raw_recv(conn
, skb
);
3008 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3010 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3011 data
+= L2CAP_CMD_HDR_SIZE
;
3012 len
-= L2CAP_CMD_HDR_SIZE
;
3014 cmd_len
= le16_to_cpu(cmd
.len
);
3016 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3018 if (cmd_len
> len
|| !cmd
.ident
) {
3019 BT_DBG("corrupted command");
3023 if (conn
->hcon
->type
== LE_LINK
)
3024 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3026 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3029 struct l2cap_cmd_rej_unk rej
;
3031 BT_ERR("Wrong link type (%d)", err
);
3033 /* FIXME: Map err to a valid reason */
3034 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3035 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3045 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3047 u16 our_fcs
, rcv_fcs
;
3048 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3050 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3051 skb_trim(skb
, skb
->len
- 2);
3052 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3053 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3055 if (our_fcs
!= rcv_fcs
)
3061 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3065 chan
->frames_sent
= 0;
3067 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3069 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3070 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3071 l2cap_send_sframe(chan
, control
);
3072 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3075 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3076 l2cap_retransmit_frames(chan
);
3078 l2cap_ertm_send(chan
);
3080 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3081 chan
->frames_sent
== 0) {
3082 control
|= L2CAP_SUPER_RCV_READY
;
3083 l2cap_send_sframe(chan
, control
);
3087 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3089 struct sk_buff
*next_skb
;
3090 int tx_seq_offset
, next_tx_seq_offset
;
3092 bt_cb(skb
)->tx_seq
= tx_seq
;
3093 bt_cb(skb
)->sar
= sar
;
3095 next_skb
= skb_peek(&chan
->srej_q
);
3097 __skb_queue_tail(&chan
->srej_q
, skb
);
3101 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3102 if (tx_seq_offset
< 0)
3103 tx_seq_offset
+= 64;
3106 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3109 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3110 chan
->buffer_seq
) % 64;
3111 if (next_tx_seq_offset
< 0)
3112 next_tx_seq_offset
+= 64;
3114 if (next_tx_seq_offset
> tx_seq_offset
) {
3115 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3119 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3122 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3124 __skb_queue_tail(&chan
->srej_q
, skb
);
3129 static void append_skb_frag(struct sk_buff
*skb
,
3130 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3132 /* skb->len reflects data in skb as well as all fragments
3133 * skb->data_len reflects only data in fragments
3135 if (!skb_has_frag_list(skb
))
3136 skb_shinfo(skb
)->frag_list
= new_frag
;
3138 new_frag
->next
= NULL
;
3140 (*last_frag
)->next
= new_frag
;
3141 *last_frag
= new_frag
;
3143 skb
->len
+= new_frag
->len
;
3144 skb
->data_len
+= new_frag
->len
;
3145 skb
->truesize
+= new_frag
->truesize
;
3148 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3152 switch (control
& L2CAP_CTRL_SAR
) {
3153 case L2CAP_SDU_UNSEGMENTED
:
3157 err
= chan
->ops
->recv(chan
->data
, skb
);
3160 case L2CAP_SDU_START
:
3164 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3167 if (chan
->sdu_len
> chan
->imtu
) {
3172 if (skb
->len
>= chan
->sdu_len
)
3176 chan
->sdu_last_frag
= skb
;
3182 case L2CAP_SDU_CONTINUE
:
3186 append_skb_frag(chan
->sdu
, skb
,
3187 &chan
->sdu_last_frag
);
3190 if (chan
->sdu
->len
>= chan
->sdu_len
)
3200 append_skb_frag(chan
->sdu
, skb
,
3201 &chan
->sdu_last_frag
);
3204 if (chan
->sdu
->len
!= chan
->sdu_len
)
3207 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3210 /* Reassembly complete */
3212 chan
->sdu_last_frag
= NULL
;
3220 kfree_skb(chan
->sdu
);
3222 chan
->sdu_last_frag
= NULL
;
3229 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3233 BT_DBG("chan %p, Enter local busy", chan
);
3235 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3237 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3238 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3239 l2cap_send_sframe(chan
, control
);
3241 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3243 __clear_ack_timer(chan
);
3246 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3250 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3253 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3254 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3255 l2cap_send_sframe(chan
, control
);
3256 chan
->retry_count
= 1;
3258 __clear_retrans_timer(chan
);
3259 __set_monitor_timer(chan
);
3261 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3264 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3265 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3267 BT_DBG("chan %p, Exit local busy", chan
);
3270 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3272 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3274 l2cap_ertm_enter_local_busy(chan
);
3276 l2cap_ertm_exit_local_busy(chan
);
3280 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3282 struct sk_buff
*skb
;
3285 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3286 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3289 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3292 skb
= skb_dequeue(&chan
->srej_q
);
3293 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3294 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3297 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3301 chan
->buffer_seq_srej
=
3302 (chan
->buffer_seq_srej
+ 1) % 64;
3303 tx_seq
= (tx_seq
+ 1) % 64;
3307 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3309 struct srej_list
*l
, *tmp
;
3312 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3313 if (l
->tx_seq
== tx_seq
) {
3318 control
= L2CAP_SUPER_SELECT_REJECT
;
3319 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3320 l2cap_send_sframe(chan
, control
);
3322 list_add_tail(&l
->list
, &chan
->srej_l
);
3326 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3328 struct srej_list
*new;
3331 while (tx_seq
!= chan
->expected_tx_seq
) {
3332 control
= L2CAP_SUPER_SELECT_REJECT
;
3333 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3334 l2cap_send_sframe(chan
, control
);
3336 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3337 new->tx_seq
= chan
->expected_tx_seq
;
3338 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3339 list_add_tail(&new->list
, &chan
->srej_l
);
3341 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3344 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3346 u8 tx_seq
= __get_txseq(rx_control
);
3347 u8 req_seq
= __get_reqseq(rx_control
);
3348 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3349 int tx_seq_offset
, expected_tx_seq_offset
;
3350 int num_to_ack
= (chan
->tx_win
/6) + 1;
3353 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3354 tx_seq
, rx_control
);
3356 if (L2CAP_CTRL_FINAL
& rx_control
&&
3357 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3358 __clear_monitor_timer(chan
);
3359 if (chan
->unacked_frames
> 0)
3360 __set_retrans_timer(chan
);
3361 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3364 chan
->expected_ack_seq
= req_seq
;
3365 l2cap_drop_acked_frames(chan
);
3367 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3368 if (tx_seq_offset
< 0)
3369 tx_seq_offset
+= 64;
3371 /* invalid tx_seq */
3372 if (tx_seq_offset
>= chan
->tx_win
) {
3373 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3377 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3380 if (tx_seq
== chan
->expected_tx_seq
)
3383 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3384 struct srej_list
*first
;
3386 first
= list_first_entry(&chan
->srej_l
,
3387 struct srej_list
, list
);
3388 if (tx_seq
== first
->tx_seq
) {
3389 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3390 l2cap_check_srej_gap(chan
, tx_seq
);
3392 list_del(&first
->list
);
3395 if (list_empty(&chan
->srej_l
)) {
3396 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3397 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3398 l2cap_send_ack(chan
);
3399 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3402 struct srej_list
*l
;
3404 /* duplicated tx_seq */
3405 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3408 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3409 if (l
->tx_seq
== tx_seq
) {
3410 l2cap_resend_srejframe(chan
, tx_seq
);
3414 l2cap_send_srejframe(chan
, tx_seq
);
3417 expected_tx_seq_offset
=
3418 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3419 if (expected_tx_seq_offset
< 0)
3420 expected_tx_seq_offset
+= 64;
3422 /* duplicated tx_seq */
3423 if (tx_seq_offset
< expected_tx_seq_offset
)
3426 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3428 BT_DBG("chan %p, Enter SREJ", chan
);
3430 INIT_LIST_HEAD(&chan
->srej_l
);
3431 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3433 __skb_queue_head_init(&chan
->srej_q
);
3434 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3436 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3438 l2cap_send_srejframe(chan
, tx_seq
);
3440 __clear_ack_timer(chan
);
3445 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3447 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3448 bt_cb(skb
)->tx_seq
= tx_seq
;
3449 bt_cb(skb
)->sar
= sar
;
3450 __skb_queue_tail(&chan
->srej_q
, skb
);
3454 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3455 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3457 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3461 if (rx_control
& L2CAP_CTRL_FINAL
) {
3462 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3463 l2cap_retransmit_frames(chan
);
3466 __set_ack_timer(chan
);
3468 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3469 if (chan
->num_acked
== num_to_ack
- 1)
3470 l2cap_send_ack(chan
);
3479 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3481 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3484 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3485 l2cap_drop_acked_frames(chan
);
3487 if (rx_control
& L2CAP_CTRL_POLL
) {
3488 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3489 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3490 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3491 (chan
->unacked_frames
> 0))
3492 __set_retrans_timer(chan
);
3494 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3495 l2cap_send_srejtail(chan
);
3497 l2cap_send_i_or_rr_or_rnr(chan
);
3500 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3501 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3503 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3504 l2cap_retransmit_frames(chan
);
3507 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3508 (chan
->unacked_frames
> 0))
3509 __set_retrans_timer(chan
);
3511 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3512 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
3513 l2cap_send_ack(chan
);
3515 l2cap_ertm_send(chan
);
3519 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3521 u8 tx_seq
= __get_reqseq(rx_control
);
3523 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3525 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3527 chan
->expected_ack_seq
= tx_seq
;
3528 l2cap_drop_acked_frames(chan
);
3530 if (rx_control
& L2CAP_CTRL_FINAL
) {
3531 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3532 l2cap_retransmit_frames(chan
);
3534 l2cap_retransmit_frames(chan
);
3536 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
3537 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
3540 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3542 u8 tx_seq
= __get_reqseq(rx_control
);
3544 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3546 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3548 if (rx_control
& L2CAP_CTRL_POLL
) {
3549 chan
->expected_ack_seq
= tx_seq
;
3550 l2cap_drop_acked_frames(chan
);
3552 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3553 l2cap_retransmit_one_frame(chan
, tx_seq
);
3555 l2cap_ertm_send(chan
);
3557 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3558 chan
->srej_save_reqseq
= tx_seq
;
3559 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3561 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3562 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
3563 chan
->srej_save_reqseq
== tx_seq
)
3564 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3566 l2cap_retransmit_one_frame(chan
, tx_seq
);
3568 l2cap_retransmit_one_frame(chan
, tx_seq
);
3569 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3570 chan
->srej_save_reqseq
= tx_seq
;
3571 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3576 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3578 u8 tx_seq
= __get_reqseq(rx_control
);
3580 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3582 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3583 chan
->expected_ack_seq
= tx_seq
;
3584 l2cap_drop_acked_frames(chan
);
3586 if (rx_control
& L2CAP_CTRL_POLL
)
3587 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3589 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3590 __clear_retrans_timer(chan
);
3591 if (rx_control
& L2CAP_CTRL_POLL
)
3592 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3596 if (rx_control
& L2CAP_CTRL_POLL
)
3597 l2cap_send_srejtail(chan
);
3599 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3602 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3604 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3606 if (L2CAP_CTRL_FINAL
& rx_control
&&
3607 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3608 __clear_monitor_timer(chan
);
3609 if (chan
->unacked_frames
> 0)
3610 __set_retrans_timer(chan
);
3611 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3614 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3615 case L2CAP_SUPER_RCV_READY
:
3616 l2cap_data_channel_rrframe(chan
, rx_control
);
3619 case L2CAP_SUPER_REJECT
:
3620 l2cap_data_channel_rejframe(chan
, rx_control
);
3623 case L2CAP_SUPER_SELECT_REJECT
:
3624 l2cap_data_channel_srejframe(chan
, rx_control
);
3627 case L2CAP_SUPER_RCV_NOT_READY
:
3628 l2cap_data_channel_rnrframe(chan
, rx_control
);
3636 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3638 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3641 int len
, next_tx_seq_offset
, req_seq_offset
;
3643 control
= get_unaligned_le16(skb
->data
);
3648 * We can just drop the corrupted I-frame here.
3649 * Receiver will miss it and start proper recovery
3650 * procedures and ask retransmission.
3652 if (l2cap_check_fcs(chan
, skb
))
3655 if (__is_sar_start(control
) && __is_iframe(control
))
3658 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3661 if (len
> chan
->mps
) {
3662 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3666 req_seq
= __get_reqseq(control
);
3667 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3668 if (req_seq_offset
< 0)
3669 req_seq_offset
+= 64;
3671 next_tx_seq_offset
=
3672 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3673 if (next_tx_seq_offset
< 0)
3674 next_tx_seq_offset
+= 64;
3676 /* check for invalid req-seq */
3677 if (req_seq_offset
> next_tx_seq_offset
) {
3678 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3682 if (__is_iframe(control
)) {
3684 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3688 l2cap_data_channel_iframe(chan
, control
, skb
);
3692 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3696 l2cap_data_channel_sframe(chan
, control
, skb
);
3706 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3708 struct l2cap_chan
*chan
;
3709 struct sock
*sk
= NULL
;
3714 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3716 BT_DBG("unknown cid 0x%4.4x", cid
);
3722 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3724 if (chan
->state
!= BT_CONNECTED
)
3727 switch (chan
->mode
) {
3728 case L2CAP_MODE_BASIC
:
3729 /* If socket recv buffers overflows we drop data here
3730 * which is *bad* because L2CAP has to be reliable.
3731 * But we don't have any other choice. L2CAP doesn't
3732 * provide flow control mechanism. */
3734 if (chan
->imtu
< skb
->len
)
3737 if (!chan
->ops
->recv(chan
->data
, skb
))
3741 case L2CAP_MODE_ERTM
:
3742 if (!sock_owned_by_user(sk
)) {
3743 l2cap_ertm_data_rcv(sk
, skb
);
3745 if (sk_add_backlog(sk
, skb
))
3751 case L2CAP_MODE_STREAMING
:
3752 control
= get_unaligned_le16(skb
->data
);
3756 if (l2cap_check_fcs(chan
, skb
))
3759 if (__is_sar_start(control
))
3762 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3765 if (len
> chan
->mps
|| len
< 0 || __is_sframe(control
))
3768 tx_seq
= __get_txseq(control
);
3770 if (chan
->expected_tx_seq
!= tx_seq
) {
3771 /* Frame(s) missing - must discard partial SDU */
3772 kfree_skb(chan
->sdu
);
3774 chan
->sdu_last_frag
= NULL
;
3777 /* TODO: Notify userland of missing data */
3780 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3782 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
3783 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3788 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3802 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3804 struct sock
*sk
= NULL
;
3805 struct l2cap_chan
*chan
;
3807 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3815 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3817 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3820 if (chan
->imtu
< skb
->len
)
3823 if (!chan
->ops
->recv(chan
->data
, skb
))
3835 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3837 struct sock
*sk
= NULL
;
3838 struct l2cap_chan
*chan
;
3840 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
3848 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3850 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3853 if (chan
->imtu
< skb
->len
)
3856 if (!chan
->ops
->recv(chan
->data
, skb
))
3868 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3870 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3874 skb_pull(skb
, L2CAP_HDR_SIZE
);
3875 cid
= __le16_to_cpu(lh
->cid
);
3876 len
= __le16_to_cpu(lh
->len
);
3878 if (len
!= skb
->len
) {
3883 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3886 case L2CAP_CID_LE_SIGNALING
:
3887 case L2CAP_CID_SIGNALING
:
3888 l2cap_sig_channel(conn
, skb
);
3891 case L2CAP_CID_CONN_LESS
:
3892 psm
= get_unaligned_le16(skb
->data
);
3894 l2cap_conless_channel(conn
, psm
, skb
);
3897 case L2CAP_CID_LE_DATA
:
3898 l2cap_att_channel(conn
, cid
, skb
);
3902 if (smp_sig_channel(conn
, skb
))
3903 l2cap_conn_del(conn
->hcon
, EACCES
);
3907 l2cap_data_channel(conn
, cid
, skb
);
3912 /* ---- L2CAP interface with lower layer (HCI) ---- */
3914 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3916 int exact
= 0, lm1
= 0, lm2
= 0;
3917 struct l2cap_chan
*c
;
3919 if (type
!= ACL_LINK
)
3922 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3924 /* Find listening sockets and check their link_mode */
3925 read_lock(&chan_list_lock
);
3926 list_for_each_entry(c
, &chan_list
, global_l
) {
3927 struct sock
*sk
= c
->sk
;
3929 if (c
->state
!= BT_LISTEN
)
3932 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3933 lm1
|= HCI_LM_ACCEPT
;
3935 lm1
|= HCI_LM_MASTER
;
3937 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3938 lm2
|= HCI_LM_ACCEPT
;
3940 lm2
|= HCI_LM_MASTER
;
3943 read_unlock(&chan_list_lock
);
3945 return exact
? lm1
: lm2
;
3948 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3950 struct l2cap_conn
*conn
;
3952 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3954 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3958 conn
= l2cap_conn_add(hcon
, status
);
3960 l2cap_conn_ready(conn
);
3962 l2cap_conn_del(hcon
, bt_to_errno(status
));
3967 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3969 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3971 BT_DBG("hcon %p", hcon
);
3973 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
3976 return conn
->disc_reason
;
3979 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3981 BT_DBG("hcon %p reason %d", hcon
, reason
);
3983 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3986 l2cap_conn_del(hcon
, bt_to_errno(reason
));
3991 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
3993 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
3996 if (encrypt
== 0x00) {
3997 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
3998 __clear_chan_timer(chan
);
3999 __set_chan_timer(chan
, HZ
* 5);
4000 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4001 l2cap_chan_close(chan
, ECONNREFUSED
);
4003 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4004 __clear_chan_timer(chan
);
4008 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4010 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4011 struct l2cap_chan
*chan
;
4016 BT_DBG("conn %p", conn
);
4018 if (hcon
->type
== LE_LINK
) {
4019 smp_distribute_keys(conn
, 0);
4020 del_timer(&conn
->security_timer
);
4023 read_lock(&conn
->chan_lock
);
4025 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4026 struct sock
*sk
= chan
->sk
;
4030 BT_DBG("chan->scid %d", chan
->scid
);
4032 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4033 if (!status
&& encrypt
) {
4034 chan
->sec_level
= hcon
->sec_level
;
4035 l2cap_chan_ready(sk
);
4042 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4047 if (!status
&& (chan
->state
== BT_CONNECTED
||
4048 chan
->state
== BT_CONFIG
)) {
4049 l2cap_check_encryption(chan
, encrypt
);
4054 if (chan
->state
== BT_CONNECT
) {
4056 struct l2cap_conn_req req
;
4057 req
.scid
= cpu_to_le16(chan
->scid
);
4058 req
.psm
= chan
->psm
;
4060 chan
->ident
= l2cap_get_ident(conn
);
4061 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4063 l2cap_send_cmd(conn
, chan
->ident
,
4064 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4066 __clear_chan_timer(chan
);
4067 __set_chan_timer(chan
, HZ
/ 10);
4069 } else if (chan
->state
== BT_CONNECT2
) {
4070 struct l2cap_conn_rsp rsp
;
4074 if (bt_sk(sk
)->defer_setup
) {
4075 struct sock
*parent
= bt_sk(sk
)->parent
;
4076 res
= L2CAP_CR_PEND
;
4077 stat
= L2CAP_CS_AUTHOR_PEND
;
4079 parent
->sk_data_ready(parent
, 0);
4081 l2cap_state_change(chan
, BT_CONFIG
);
4082 res
= L2CAP_CR_SUCCESS
;
4083 stat
= L2CAP_CS_NO_INFO
;
4086 l2cap_state_change(chan
, BT_DISCONN
);
4087 __set_chan_timer(chan
, HZ
/ 10);
4088 res
= L2CAP_CR_SEC_BLOCK
;
4089 stat
= L2CAP_CS_NO_INFO
;
4092 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4093 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4094 rsp
.result
= cpu_to_le16(res
);
4095 rsp
.status
= cpu_to_le16(stat
);
4096 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4103 read_unlock(&conn
->chan_lock
);
4108 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4110 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4113 conn
= l2cap_conn_add(hcon
, 0);
4118 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4120 if (!(flags
& ACL_CONT
)) {
4121 struct l2cap_hdr
*hdr
;
4122 struct l2cap_chan
*chan
;
4127 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4128 kfree_skb(conn
->rx_skb
);
4129 conn
->rx_skb
= NULL
;
4131 l2cap_conn_unreliable(conn
, ECOMM
);
4134 /* Start fragment always begin with Basic L2CAP header */
4135 if (skb
->len
< L2CAP_HDR_SIZE
) {
4136 BT_ERR("Frame is too short (len %d)", skb
->len
);
4137 l2cap_conn_unreliable(conn
, ECOMM
);
4141 hdr
= (struct l2cap_hdr
*) skb
->data
;
4142 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4143 cid
= __le16_to_cpu(hdr
->cid
);
4145 if (len
== skb
->len
) {
4146 /* Complete frame received */
4147 l2cap_recv_frame(conn
, skb
);
4151 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4153 if (skb
->len
> len
) {
4154 BT_ERR("Frame is too long (len %d, expected len %d)",
4156 l2cap_conn_unreliable(conn
, ECOMM
);
4160 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4162 if (chan
&& chan
->sk
) {
4163 struct sock
*sk
= chan
->sk
;
4165 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4166 BT_ERR("Frame exceeding recv MTU (len %d, "
4170 l2cap_conn_unreliable(conn
, ECOMM
);
4176 /* Allocate skb for the complete frame (with header) */
4177 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4181 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4183 conn
->rx_len
= len
- skb
->len
;
4185 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4187 if (!conn
->rx_len
) {
4188 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4189 l2cap_conn_unreliable(conn
, ECOMM
);
4193 if (skb
->len
> conn
->rx_len
) {
4194 BT_ERR("Fragment is too long (len %d, expected %d)",
4195 skb
->len
, conn
->rx_len
);
4196 kfree_skb(conn
->rx_skb
);
4197 conn
->rx_skb
= NULL
;
4199 l2cap_conn_unreliable(conn
, ECOMM
);
4203 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4205 conn
->rx_len
-= skb
->len
;
4207 if (!conn
->rx_len
) {
4208 /* Complete frame received */
4209 l2cap_recv_frame(conn
, conn
->rx_skb
);
4210 conn
->rx_skb
= NULL
;
4219 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4221 struct l2cap_chan
*c
;
4223 read_lock_bh(&chan_list_lock
);
4225 list_for_each_entry(c
, &chan_list
, global_l
) {
4226 struct sock
*sk
= c
->sk
;
4228 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4229 batostr(&bt_sk(sk
)->src
),
4230 batostr(&bt_sk(sk
)->dst
),
4231 c
->state
, __le16_to_cpu(c
->psm
),
4232 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4233 c
->sec_level
, c
->mode
);
4236 read_unlock_bh(&chan_list_lock
);
4241 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4243 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4246 static const struct file_operations l2cap_debugfs_fops
= {
4247 .open
= l2cap_debugfs_open
,
4249 .llseek
= seq_lseek
,
4250 .release
= single_release
,
4253 static struct dentry
*l2cap_debugfs
;
4255 static struct hci_proto l2cap_hci_proto
= {
4257 .id
= HCI_PROTO_L2CAP
,
4258 .connect_ind
= l2cap_connect_ind
,
4259 .connect_cfm
= l2cap_connect_cfm
,
4260 .disconn_ind
= l2cap_disconn_ind
,
4261 .disconn_cfm
= l2cap_disconn_cfm
,
4262 .security_cfm
= l2cap_security_cfm
,
4263 .recv_acldata
= l2cap_recv_acldata
4266 int __init
l2cap_init(void)
4270 err
= l2cap_init_sockets();
4274 err
= hci_register_proto(&l2cap_hci_proto
);
4276 BT_ERR("L2CAP protocol registration failed");
4277 bt_sock_unregister(BTPROTO_L2CAP
);
4282 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4283 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4285 BT_ERR("Failed to create L2CAP debug file");
4291 l2cap_cleanup_sockets();
4295 void l2cap_exit(void)
4297 debugfs_remove(l2cap_debugfs
);
4299 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4300 BT_ERR("L2CAP protocol unregistration failed");
4302 l2cap_cleanup_sockets();
4305 module_param(disable_ertm
, bool, 0644);
4306 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");