2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
50 static LIST_HEAD(chan_list
);
51 static DEFINE_RWLOCK(chan_list_lock
);
53 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
54 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
56 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
57 u8 code
, u8 ident
, u16 dlen
, void *data
);
58 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
60 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
61 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
63 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
64 struct sk_buff_head
*skbs
, u8 event
);
66 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
68 if (hcon
->type
== LE_LINK
) {
69 if (type
== ADDR_LE_DEV_PUBLIC
)
70 return BDADDR_LE_PUBLIC
;
72 return BDADDR_LE_RANDOM
;
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
97 list_for_each_entry(c
, &conn
->chan_l
, list
) {
104 /* Find channel with given SCID.
105 * Returns locked channel. */
106 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_scid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 /* Find channel with given DCID.
121 * Returns locked channel.
123 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
126 struct l2cap_chan
*c
;
128 mutex_lock(&conn
->chan_lock
);
129 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
132 mutex_unlock(&conn
->chan_lock
);
137 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
140 struct l2cap_chan
*c
;
142 list_for_each_entry(c
, &conn
->chan_l
, list
) {
143 if (c
->ident
== ident
)
149 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
152 struct l2cap_chan
*c
;
154 mutex_lock(&conn
->chan_lock
);
155 c
= __l2cap_get_chan_by_ident(conn
, ident
);
158 mutex_unlock(&conn
->chan_lock
);
163 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
165 struct l2cap_chan
*c
;
167 list_for_each_entry(c
, &chan_list
, global_l
) {
168 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
174 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
178 write_lock(&chan_list_lock
);
180 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
193 for (p
= 0x1001; p
< 0x1100; p
+= 2)
194 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
195 chan
->psm
= cpu_to_le16(p
);
196 chan
->sport
= cpu_to_le16(p
);
203 write_unlock(&chan_list_lock
);
206 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
208 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
210 write_lock(&chan_list_lock
);
212 /* Override the defaults (which are for conn-oriented) */
213 chan
->omtu
= L2CAP_DEFAULT_MTU
;
214 chan
->chan_type
= L2CAP_CHAN_FIXED
;
218 write_unlock(&chan_list_lock
);
223 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
227 if (conn
->hcon
->type
== LE_LINK
)
228 dyn_end
= L2CAP_CID_LE_DYN_END
;
230 dyn_end
= L2CAP_CID_DYN_END
;
232 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
233 if (!__l2cap_get_chan_by_scid(conn
, cid
))
240 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
242 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
243 state_to_string(state
));
246 chan
->ops
->state_change(chan
, state
, 0);
249 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
253 chan
->ops
->state_change(chan
, chan
->state
, err
);
256 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
258 chan
->ops
->state_change(chan
, chan
->state
, err
);
261 static void __set_retrans_timer(struct l2cap_chan
*chan
)
263 if (!delayed_work_pending(&chan
->monitor_timer
) &&
264 chan
->retrans_timeout
) {
265 l2cap_set_timer(chan
, &chan
->retrans_timer
,
266 msecs_to_jiffies(chan
->retrans_timeout
));
270 static void __set_monitor_timer(struct l2cap_chan
*chan
)
272 __clear_retrans_timer(chan
);
273 if (chan
->monitor_timeout
) {
274 l2cap_set_timer(chan
, &chan
->monitor_timer
,
275 msecs_to_jiffies(chan
->monitor_timeout
));
279 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
284 skb_queue_walk(head
, skb
) {
285 if (bt_cb(skb
)->control
.txseq
== seq
)
292 /* ---- L2CAP sequence number lists ---- */
294 /* For ERTM, ordered lists of sequence numbers must be tracked for
295 * SREJ requests that are received and for frames that are to be
296 * retransmitted. These seq_list functions implement a singly-linked
297 * list in an array, where membership in the list can also be checked
298 * in constant time. Items can also be added to the tail of the list
299 * and removed from the head in constant time, without further memory
303 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
305 size_t alloc_size
, i
;
307 /* Allocated size is a power of 2 to map sequence numbers
308 * (which may be up to 14 bits) in to a smaller array that is
309 * sized for the negotiated ERTM transmit windows.
311 alloc_size
= roundup_pow_of_two(size
);
313 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
317 seq_list
->mask
= alloc_size
- 1;
318 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
319 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
320 for (i
= 0; i
< alloc_size
; i
++)
321 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
326 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
328 kfree(seq_list
->list
);
331 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
334 /* Constant-time check for list membership */
335 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
338 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
340 u16 seq
= seq_list
->head
;
341 u16 mask
= seq_list
->mask
;
343 seq_list
->head
= seq_list
->list
[seq
& mask
];
344 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
346 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
347 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
348 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
354 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
358 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
361 for (i
= 0; i
<= seq_list
->mask
; i
++)
362 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
364 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
365 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
368 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
370 u16 mask
= seq_list
->mask
;
372 /* All appends happen in constant time */
374 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
377 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
378 seq_list
->head
= seq
;
380 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
382 seq_list
->tail
= seq
;
383 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
386 static void l2cap_chan_timeout(struct work_struct
*work
)
388 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
390 struct l2cap_conn
*conn
= chan
->conn
;
393 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
395 mutex_lock(&conn
->chan_lock
);
396 l2cap_chan_lock(chan
);
398 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
399 reason
= ECONNREFUSED
;
400 else if (chan
->state
== BT_CONNECT
&&
401 chan
->sec_level
!= BT_SECURITY_SDP
)
402 reason
= ECONNREFUSED
;
406 l2cap_chan_close(chan
, reason
);
408 l2cap_chan_unlock(chan
);
410 chan
->ops
->close(chan
);
411 mutex_unlock(&conn
->chan_lock
);
413 l2cap_chan_put(chan
);
416 struct l2cap_chan
*l2cap_chan_create(void)
418 struct l2cap_chan
*chan
;
420 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
424 mutex_init(&chan
->lock
);
426 /* Set default lock nesting level */
427 atomic_set(&chan
->nesting
, L2CAP_NESTING_NORMAL
);
429 write_lock(&chan_list_lock
);
430 list_add(&chan
->global_l
, &chan_list
);
431 write_unlock(&chan_list_lock
);
433 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
435 chan
->state
= BT_OPEN
;
437 kref_init(&chan
->kref
);
439 /* This flag is cleared in l2cap_chan_ready() */
440 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
442 BT_DBG("chan %p", chan
);
446 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
448 static void l2cap_chan_destroy(struct kref
*kref
)
450 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
452 BT_DBG("chan %p", chan
);
454 write_lock(&chan_list_lock
);
455 list_del(&chan
->global_l
);
456 write_unlock(&chan_list_lock
);
461 void l2cap_chan_hold(struct l2cap_chan
*c
)
463 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
468 void l2cap_chan_put(struct l2cap_chan
*c
)
470 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
472 kref_put(&c
->kref
, l2cap_chan_destroy
);
474 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
476 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
478 chan
->fcs
= L2CAP_FCS_CRC16
;
479 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
480 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
481 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
482 chan
->remote_max_tx
= chan
->max_tx
;
483 chan
->remote_tx_win
= chan
->tx_win
;
484 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
485 chan
->sec_level
= BT_SECURITY_LOW
;
486 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
487 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
488 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
489 chan
->conf_state
= 0;
491 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
495 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
498 chan
->sdu_last_frag
= NULL
;
500 chan
->tx_credits
= 0;
501 chan
->rx_credits
= le_max_credits
;
502 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
504 skb_queue_head_init(&chan
->tx_q
);
507 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
509 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
510 __le16_to_cpu(chan
->psm
), chan
->dcid
);
512 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
516 switch (chan
->chan_type
) {
517 case L2CAP_CHAN_CONN_ORIENTED
:
518 /* Alloc CID for connection-oriented socket */
519 chan
->scid
= l2cap_alloc_cid(conn
);
520 if (conn
->hcon
->type
== ACL_LINK
)
521 chan
->omtu
= L2CAP_DEFAULT_MTU
;
524 case L2CAP_CHAN_CONN_LESS
:
525 /* Connectionless socket */
526 chan
->scid
= L2CAP_CID_CONN_LESS
;
527 chan
->dcid
= L2CAP_CID_CONN_LESS
;
528 chan
->omtu
= L2CAP_DEFAULT_MTU
;
531 case L2CAP_CHAN_FIXED
:
532 /* Caller will set CID and CID specific MTU values */
536 /* Raw socket can send/recv signalling messages only */
537 chan
->scid
= L2CAP_CID_SIGNALING
;
538 chan
->dcid
= L2CAP_CID_SIGNALING
;
539 chan
->omtu
= L2CAP_DEFAULT_MTU
;
542 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
543 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
544 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
545 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
546 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
547 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
549 l2cap_chan_hold(chan
);
551 /* Only keep a reference for fixed channels if they requested it */
552 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
553 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
554 hci_conn_hold(conn
->hcon
);
556 list_add(&chan
->list
, &conn
->chan_l
);
559 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
561 mutex_lock(&conn
->chan_lock
);
562 __l2cap_chan_add(conn
, chan
);
563 mutex_unlock(&conn
->chan_lock
);
566 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
568 struct l2cap_conn
*conn
= chan
->conn
;
570 __clear_chan_timer(chan
);
572 BT_DBG("chan %p, conn %p, err %d, state %s", chan
, conn
, err
,
573 state_to_string(chan
->state
));
575 chan
->ops
->teardown(chan
, err
);
578 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
579 /* Delete from channel list */
580 list_del(&chan
->list
);
582 l2cap_chan_put(chan
);
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
590 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
591 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
592 hci_conn_drop(conn
->hcon
);
594 if (mgr
&& mgr
->bredr_chan
== chan
)
595 mgr
->bredr_chan
= NULL
;
598 if (chan
->hs_hchan
) {
599 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
601 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
602 amp_disconnect_logical_link(hs_hchan
);
605 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
609 case L2CAP_MODE_BASIC
:
612 case L2CAP_MODE_LE_FLOWCTL
:
613 skb_queue_purge(&chan
->tx_q
);
616 case L2CAP_MODE_ERTM
:
617 __clear_retrans_timer(chan
);
618 __clear_monitor_timer(chan
);
619 __clear_ack_timer(chan
);
621 skb_queue_purge(&chan
->srej_q
);
623 l2cap_seq_list_free(&chan
->srej_list
);
624 l2cap_seq_list_free(&chan
->retrans_list
);
628 case L2CAP_MODE_STREAMING
:
629 skb_queue_purge(&chan
->tx_q
);
635 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
637 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
639 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
640 id_addr_update_work
);
641 struct hci_conn
*hcon
= conn
->hcon
;
642 struct l2cap_chan
*chan
;
644 mutex_lock(&conn
->chan_lock
);
646 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
647 l2cap_chan_lock(chan
);
648 bacpy(&chan
->dst
, &hcon
->dst
);
649 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
650 l2cap_chan_unlock(chan
);
653 mutex_unlock(&conn
->chan_lock
);
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
658 struct l2cap_conn
*conn
= chan
->conn
;
659 struct l2cap_le_conn_rsp rsp
;
662 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
663 result
= L2CAP_CR_AUTHORIZATION
;
665 result
= L2CAP_CR_BAD_PSM
;
667 l2cap_state_change(chan
, BT_DISCONN
);
669 rsp
.dcid
= cpu_to_le16(chan
->scid
);
670 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
671 rsp
.mps
= cpu_to_le16(chan
->mps
);
672 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
673 rsp
.result
= cpu_to_le16(result
);
675 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
679 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
681 struct l2cap_conn
*conn
= chan
->conn
;
682 struct l2cap_conn_rsp rsp
;
685 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
686 result
= L2CAP_CR_SEC_BLOCK
;
688 result
= L2CAP_CR_BAD_PSM
;
690 l2cap_state_change(chan
, BT_DISCONN
);
692 rsp
.scid
= cpu_to_le16(chan
->dcid
);
693 rsp
.dcid
= cpu_to_le16(chan
->scid
);
694 rsp
.result
= cpu_to_le16(result
);
695 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
697 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
700 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
702 struct l2cap_conn
*conn
= chan
->conn
;
704 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
706 switch (chan
->state
) {
708 chan
->ops
->teardown(chan
, 0);
713 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
714 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
715 l2cap_send_disconn_req(chan
, reason
);
717 l2cap_chan_del(chan
, reason
);
721 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
722 if (conn
->hcon
->type
== ACL_LINK
)
723 l2cap_chan_connect_reject(chan
);
724 else if (conn
->hcon
->type
== LE_LINK
)
725 l2cap_chan_le_connect_reject(chan
);
728 l2cap_chan_del(chan
, reason
);
733 l2cap_chan_del(chan
, reason
);
737 chan
->ops
->teardown(chan
, 0);
741 EXPORT_SYMBOL(l2cap_chan_close
);
743 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
745 switch (chan
->chan_type
) {
747 switch (chan
->sec_level
) {
748 case BT_SECURITY_HIGH
:
749 case BT_SECURITY_FIPS
:
750 return HCI_AT_DEDICATED_BONDING_MITM
;
751 case BT_SECURITY_MEDIUM
:
752 return HCI_AT_DEDICATED_BONDING
;
754 return HCI_AT_NO_BONDING
;
757 case L2CAP_CHAN_CONN_LESS
:
758 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
759 if (chan
->sec_level
== BT_SECURITY_LOW
)
760 chan
->sec_level
= BT_SECURITY_SDP
;
762 if (chan
->sec_level
== BT_SECURITY_HIGH
||
763 chan
->sec_level
== BT_SECURITY_FIPS
)
764 return HCI_AT_NO_BONDING_MITM
;
766 return HCI_AT_NO_BONDING
;
768 case L2CAP_CHAN_CONN_ORIENTED
:
769 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
770 if (chan
->sec_level
== BT_SECURITY_LOW
)
771 chan
->sec_level
= BT_SECURITY_SDP
;
773 if (chan
->sec_level
== BT_SECURITY_HIGH
||
774 chan
->sec_level
== BT_SECURITY_FIPS
)
775 return HCI_AT_NO_BONDING_MITM
;
777 return HCI_AT_NO_BONDING
;
781 switch (chan
->sec_level
) {
782 case BT_SECURITY_HIGH
:
783 case BT_SECURITY_FIPS
:
784 return HCI_AT_GENERAL_BONDING_MITM
;
785 case BT_SECURITY_MEDIUM
:
786 return HCI_AT_GENERAL_BONDING
;
788 return HCI_AT_NO_BONDING
;
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
797 struct l2cap_conn
*conn
= chan
->conn
;
800 if (conn
->hcon
->type
== LE_LINK
)
801 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
803 auth_type
= l2cap_get_auth_type(chan
);
805 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
809 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
819 mutex_lock(&conn
->ident_lock
);
821 if (++conn
->tx_ident
> 128)
826 mutex_unlock(&conn
->ident_lock
);
831 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
834 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
837 BT_DBG("code 0x%2.2x", code
);
842 /* Use NO_FLUSH if supported or we have an LE link (which does
843 * not support auto-flushing packets) */
844 if (lmp_no_flush_capable(conn
->hcon
->hdev
) ||
845 conn
->hcon
->type
== LE_LINK
)
846 flags
= ACL_START_NO_FLUSH
;
850 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
851 skb
->priority
= HCI_PRIO_MAX
;
853 hci_send_acl(conn
->hchan
, skb
, flags
);
856 static bool __chan_is_moving(struct l2cap_chan
*chan
)
858 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
859 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
862 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
864 struct hci_conn
*hcon
= chan
->conn
->hcon
;
867 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
870 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
872 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
879 /* Use NO_FLUSH for LE links (where this is the only option) or
880 * if the BR/EDR link supports it and flushing has not been
881 * explicitly requested (through FLAG_FLUSHABLE).
883 if (hcon
->type
== LE_LINK
||
884 (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
885 lmp_no_flush_capable(hcon
->hdev
)))
886 flags
= ACL_START_NO_FLUSH
;
890 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
891 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
894 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
896 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
897 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
899 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
902 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
903 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
910 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
911 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
918 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
920 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
921 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
923 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
926 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
927 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
934 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
935 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
942 static inline void __unpack_control(struct l2cap_chan
*chan
,
945 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
946 __unpack_extended_control(get_unaligned_le32(skb
->data
),
947 &bt_cb(skb
)->control
);
948 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
950 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
951 &bt_cb(skb
)->control
);
952 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
956 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
960 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
961 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
963 if (control
->sframe
) {
964 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
965 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
966 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
968 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
969 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
975 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
979 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
980 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
982 if (control
->sframe
) {
983 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
984 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
985 packed
|= L2CAP_CTRL_FRAME_TYPE
;
987 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
988 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
994 static inline void __pack_control(struct l2cap_chan
*chan
,
995 struct l2cap_ctrl
*control
,
998 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
999 put_unaligned_le32(__pack_extended_control(control
),
1000 skb
->data
+ L2CAP_HDR_SIZE
);
1002 put_unaligned_le16(__pack_enhanced_control(control
),
1003 skb
->data
+ L2CAP_HDR_SIZE
);
1007 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
1009 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1010 return L2CAP_EXT_HDR_SIZE
;
1012 return L2CAP_ENH_HDR_SIZE
;
1015 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1018 struct sk_buff
*skb
;
1019 struct l2cap_hdr
*lh
;
1020 int hlen
= __ertm_hdr_size(chan
);
1022 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1023 hlen
+= L2CAP_FCS_SIZE
;
1025 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1028 return ERR_PTR(-ENOMEM
);
1030 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1031 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1032 lh
->cid
= cpu_to_le16(chan
->dcid
);
1034 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1035 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1037 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1039 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1040 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1041 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1044 skb
->priority
= HCI_PRIO_MAX
;
1048 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1049 struct l2cap_ctrl
*control
)
1051 struct sk_buff
*skb
;
1054 BT_DBG("chan %p, control %p", chan
, control
);
1056 if (!control
->sframe
)
1059 if (__chan_is_moving(chan
))
1062 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1066 if (control
->super
== L2CAP_SUPER_RR
)
1067 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1068 else if (control
->super
== L2CAP_SUPER_RNR
)
1069 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1071 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1072 chan
->last_acked_seq
= control
->reqseq
;
1073 __clear_ack_timer(chan
);
1076 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1077 control
->final
, control
->poll
, control
->super
);
1079 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1080 control_field
= __pack_extended_control(control
);
1082 control_field
= __pack_enhanced_control(control
);
1084 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1086 l2cap_do_send(chan
, skb
);
1089 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1091 struct l2cap_ctrl control
;
1093 BT_DBG("chan %p, poll %d", chan
, poll
);
1095 memset(&control
, 0, sizeof(control
));
1097 control
.poll
= poll
;
1099 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1100 control
.super
= L2CAP_SUPER_RNR
;
1102 control
.super
= L2CAP_SUPER_RR
;
1104 control
.reqseq
= chan
->buffer_seq
;
1105 l2cap_send_sframe(chan
, &control
);
1108 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1110 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1113 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1116 static bool __amp_capable(struct l2cap_chan
*chan
)
1118 struct l2cap_conn
*conn
= chan
->conn
;
1119 struct hci_dev
*hdev
;
1120 bool amp_available
= false;
1122 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
1125 if (!(conn
->remote_fixed_chan
& L2CAP_FC_A2MP
))
1128 read_lock(&hci_dev_list_lock
);
1129 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1130 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1131 test_bit(HCI_UP
, &hdev
->flags
)) {
1132 amp_available
= true;
1136 read_unlock(&hci_dev_list_lock
);
1138 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1139 return amp_available
;
1144 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1146 /* Check EFS parameters */
1150 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1152 struct l2cap_conn
*conn
= chan
->conn
;
1153 struct l2cap_conn_req req
;
1155 req
.scid
= cpu_to_le16(chan
->scid
);
1156 req
.psm
= chan
->psm
;
1158 chan
->ident
= l2cap_get_ident(conn
);
1160 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1162 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1165 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1167 struct l2cap_create_chan_req req
;
1168 req
.scid
= cpu_to_le16(chan
->scid
);
1169 req
.psm
= chan
->psm
;
1170 req
.amp_id
= amp_id
;
1172 chan
->ident
= l2cap_get_ident(chan
->conn
);
1174 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1178 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1180 struct sk_buff
*skb
;
1182 BT_DBG("chan %p", chan
);
1184 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1187 __clear_retrans_timer(chan
);
1188 __clear_monitor_timer(chan
);
1189 __clear_ack_timer(chan
);
1191 chan
->retry_count
= 0;
1192 skb_queue_walk(&chan
->tx_q
, skb
) {
1193 if (bt_cb(skb
)->control
.retries
)
1194 bt_cb(skb
)->control
.retries
= 1;
1199 chan
->expected_tx_seq
= chan
->buffer_seq
;
1201 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1202 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1203 l2cap_seq_list_clear(&chan
->retrans_list
);
1204 l2cap_seq_list_clear(&chan
->srej_list
);
1205 skb_queue_purge(&chan
->srej_q
);
1207 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1208 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1210 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1213 static void l2cap_move_done(struct l2cap_chan
*chan
)
1215 u8 move_role
= chan
->move_role
;
1216 BT_DBG("chan %p", chan
);
1218 chan
->move_state
= L2CAP_MOVE_STABLE
;
1219 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1221 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1224 switch (move_role
) {
1225 case L2CAP_MOVE_ROLE_INITIATOR
:
1226 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1227 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1229 case L2CAP_MOVE_ROLE_RESPONDER
:
1230 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1235 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1237 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1238 chan
->conf_state
= 0;
1239 __clear_chan_timer(chan
);
1241 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1242 chan
->ops
->suspend(chan
);
1244 chan
->state
= BT_CONNECTED
;
1246 chan
->ops
->ready(chan
);
1249 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1251 struct l2cap_conn
*conn
= chan
->conn
;
1252 struct l2cap_le_conn_req req
;
1254 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1257 req
.psm
= chan
->psm
;
1258 req
.scid
= cpu_to_le16(chan
->scid
);
1259 req
.mtu
= cpu_to_le16(chan
->imtu
);
1260 req
.mps
= cpu_to_le16(chan
->mps
);
1261 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1263 chan
->ident
= l2cap_get_ident(conn
);
1265 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1269 static void l2cap_le_start(struct l2cap_chan
*chan
)
1271 struct l2cap_conn
*conn
= chan
->conn
;
1273 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1277 l2cap_chan_ready(chan
);
1281 if (chan
->state
== BT_CONNECT
)
1282 l2cap_le_connect(chan
);
1285 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1287 if (__amp_capable(chan
)) {
1288 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1289 a2mp_discover_amp(chan
);
1290 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1291 l2cap_le_start(chan
);
1293 l2cap_send_conn_req(chan
);
1297 static void l2cap_request_info(struct l2cap_conn
*conn
)
1299 struct l2cap_info_req req
;
1301 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1304 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1306 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1307 conn
->info_ident
= l2cap_get_ident(conn
);
1309 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1311 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1315 static void l2cap_do_start(struct l2cap_chan
*chan
)
1317 struct l2cap_conn
*conn
= chan
->conn
;
1319 if (conn
->hcon
->type
== LE_LINK
) {
1320 l2cap_le_start(chan
);
1324 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1325 l2cap_request_info(conn
);
1329 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1332 if (l2cap_chan_check_security(chan
, true) &&
1333 __l2cap_no_conn_pending(chan
))
1334 l2cap_start_connection(chan
);
1337 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1339 u32 local_feat_mask
= l2cap_feat_mask
;
1341 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1344 case L2CAP_MODE_ERTM
:
1345 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1346 case L2CAP_MODE_STREAMING
:
1347 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1353 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1355 struct l2cap_conn
*conn
= chan
->conn
;
1356 struct l2cap_disconn_req req
;
1361 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1362 __clear_retrans_timer(chan
);
1363 __clear_monitor_timer(chan
);
1364 __clear_ack_timer(chan
);
1367 if (chan
->scid
== L2CAP_CID_A2MP
) {
1368 l2cap_state_change(chan
, BT_DISCONN
);
1372 req
.dcid
= cpu_to_le16(chan
->dcid
);
1373 req
.scid
= cpu_to_le16(chan
->scid
);
1374 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1377 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1380 /* ---- L2CAP connections ---- */
1381 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1383 struct l2cap_chan
*chan
, *tmp
;
1385 BT_DBG("conn %p", conn
);
1387 mutex_lock(&conn
->chan_lock
);
1389 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1390 l2cap_chan_lock(chan
);
1392 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1393 l2cap_chan_ready(chan
);
1394 l2cap_chan_unlock(chan
);
1398 if (chan
->state
== BT_CONNECT
) {
1399 if (!l2cap_chan_check_security(chan
, true) ||
1400 !__l2cap_no_conn_pending(chan
)) {
1401 l2cap_chan_unlock(chan
);
1405 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1406 && test_bit(CONF_STATE2_DEVICE
,
1407 &chan
->conf_state
)) {
1408 l2cap_chan_close(chan
, ECONNRESET
);
1409 l2cap_chan_unlock(chan
);
1413 l2cap_start_connection(chan
);
1415 } else if (chan
->state
== BT_CONNECT2
) {
1416 struct l2cap_conn_rsp rsp
;
1418 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1419 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1421 if (l2cap_chan_check_security(chan
, false)) {
1422 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1423 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1424 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1425 chan
->ops
->defer(chan
);
1428 l2cap_state_change(chan
, BT_CONFIG
);
1429 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1430 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1433 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1434 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1437 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1440 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1441 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1442 l2cap_chan_unlock(chan
);
1446 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1447 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1448 l2cap_build_conf_req(chan
, buf
), buf
);
1449 chan
->num_conf_req
++;
1452 l2cap_chan_unlock(chan
);
1455 mutex_unlock(&conn
->chan_lock
);
1458 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1460 struct hci_conn
*hcon
= conn
->hcon
;
1461 struct hci_dev
*hdev
= hcon
->hdev
;
1463 BT_DBG("%s conn %p", hdev
->name
, conn
);
1465 /* For outgoing pairing which doesn't necessarily have an
1466 * associated socket (e.g. mgmt_pair_device).
1469 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1471 /* For LE slave connections, make sure the connection interval
1472 * is in the range of the minium and maximum interval that has
1473 * been configured for this connection. If not, then trigger
1474 * the connection update procedure.
1476 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1477 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1478 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1479 struct l2cap_conn_param_update_req req
;
1481 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1482 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1483 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1484 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1486 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1487 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1491 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1493 struct l2cap_chan
*chan
;
1494 struct hci_conn
*hcon
= conn
->hcon
;
1496 BT_DBG("conn %p", conn
);
1498 if (hcon
->type
== ACL_LINK
)
1499 l2cap_request_info(conn
);
1501 mutex_lock(&conn
->chan_lock
);
1503 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1505 l2cap_chan_lock(chan
);
1507 if (chan
->scid
== L2CAP_CID_A2MP
) {
1508 l2cap_chan_unlock(chan
);
1512 if (hcon
->type
== LE_LINK
) {
1513 l2cap_le_start(chan
);
1514 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1515 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1516 l2cap_chan_ready(chan
);
1517 } else if (chan
->state
== BT_CONNECT
) {
1518 l2cap_do_start(chan
);
1521 l2cap_chan_unlock(chan
);
1524 mutex_unlock(&conn
->chan_lock
);
1526 if (hcon
->type
== LE_LINK
)
1527 l2cap_le_conn_ready(conn
);
1529 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1532 /* Notify sockets that we cannot guaranty reliability anymore */
1533 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1535 struct l2cap_chan
*chan
;
1537 BT_DBG("conn %p", conn
);
1539 mutex_lock(&conn
->chan_lock
);
1541 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1542 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1543 l2cap_chan_set_err(chan
, err
);
1546 mutex_unlock(&conn
->chan_lock
);
1549 static void l2cap_info_timeout(struct work_struct
*work
)
1551 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1554 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1555 conn
->info_ident
= 0;
1557 l2cap_conn_start(conn
);
1562 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1563 * callback is called during registration. The ->remove callback is called
1564 * during unregistration.
1565 * An l2cap_user object can either be explicitly unregistered or when the
1566 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1567 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1568 * External modules must own a reference to the l2cap_conn object if they intend
1569 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1570 * any time if they don't.
1573 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1575 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1578 /* We need to check whether l2cap_conn is registered. If it is not, we
1579 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1580 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1581 * relies on the parent hci_conn object to be locked. This itself relies
1582 * on the hci_dev object to be locked. So we must lock the hci device
1587 if (user
->list
.next
|| user
->list
.prev
) {
1592 /* conn->hchan is NULL after l2cap_conn_del() was called */
1598 ret
= user
->probe(conn
, user
);
1602 list_add(&user
->list
, &conn
->users
);
1606 hci_dev_unlock(hdev
);
1609 EXPORT_SYMBOL(l2cap_register_user
);
1611 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1613 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1617 if (!user
->list
.next
|| !user
->list
.prev
)
1620 list_del(&user
->list
);
1621 user
->list
.next
= NULL
;
1622 user
->list
.prev
= NULL
;
1623 user
->remove(conn
, user
);
1626 hci_dev_unlock(hdev
);
1628 EXPORT_SYMBOL(l2cap_unregister_user
);
1630 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1632 struct l2cap_user
*user
;
1634 while (!list_empty(&conn
->users
)) {
1635 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1636 list_del(&user
->list
);
1637 user
->list
.next
= NULL
;
1638 user
->list
.prev
= NULL
;
1639 user
->remove(conn
, user
);
1643 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1645 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1646 struct l2cap_chan
*chan
, *l
;
1651 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1653 kfree_skb(conn
->rx_skb
);
1655 skb_queue_purge(&conn
->pending_rx
);
1657 /* We can not call flush_work(&conn->pending_rx_work) here since we
1658 * might block if we are running on a worker from the same workqueue
1659 * pending_rx_work is waiting on.
1661 if (work_pending(&conn
->pending_rx_work
))
1662 cancel_work_sync(&conn
->pending_rx_work
);
1664 if (work_pending(&conn
->id_addr_update_work
))
1665 cancel_work_sync(&conn
->id_addr_update_work
);
1667 l2cap_unregister_all_users(conn
);
1669 /* Force the connection to be immediately dropped */
1670 hcon
->disc_timeout
= 0;
1672 mutex_lock(&conn
->chan_lock
);
1675 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1676 l2cap_chan_hold(chan
);
1677 l2cap_chan_lock(chan
);
1679 l2cap_chan_del(chan
, err
);
1681 l2cap_chan_unlock(chan
);
1683 chan
->ops
->close(chan
);
1684 l2cap_chan_put(chan
);
1687 mutex_unlock(&conn
->chan_lock
);
1689 hci_chan_del(conn
->hchan
);
1691 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1692 cancel_delayed_work_sync(&conn
->info_timer
);
1694 hcon
->l2cap_data
= NULL
;
1696 l2cap_conn_put(conn
);
1699 static void l2cap_conn_free(struct kref
*ref
)
1701 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1703 hci_conn_put(conn
->hcon
);
1707 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1709 kref_get(&conn
->ref
);
1712 EXPORT_SYMBOL(l2cap_conn_get
);
1714 void l2cap_conn_put(struct l2cap_conn
*conn
)
1716 kref_put(&conn
->ref
, l2cap_conn_free
);
1718 EXPORT_SYMBOL(l2cap_conn_put
);
1720 /* ---- Socket interface ---- */
1722 /* Find socket with psm and source / destination bdaddr.
1723 * Returns closest match.
1725 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1730 struct l2cap_chan
*c
, *c1
= NULL
;
1732 read_lock(&chan_list_lock
);
1734 list_for_each_entry(c
, &chan_list
, global_l
) {
1735 if (state
&& c
->state
!= state
)
1738 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1741 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1744 if (c
->psm
== psm
) {
1745 int src_match
, dst_match
;
1746 int src_any
, dst_any
;
1749 src_match
= !bacmp(&c
->src
, src
);
1750 dst_match
= !bacmp(&c
->dst
, dst
);
1751 if (src_match
&& dst_match
) {
1753 read_unlock(&chan_list_lock
);
1758 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1759 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1760 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1761 (src_any
&& dst_any
))
1767 l2cap_chan_hold(c1
);
1769 read_unlock(&chan_list_lock
);
1774 static void l2cap_monitor_timeout(struct work_struct
*work
)
1776 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1777 monitor_timer
.work
);
1779 BT_DBG("chan %p", chan
);
1781 l2cap_chan_lock(chan
);
1784 l2cap_chan_unlock(chan
);
1785 l2cap_chan_put(chan
);
1789 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1791 l2cap_chan_unlock(chan
);
1792 l2cap_chan_put(chan
);
1795 static void l2cap_retrans_timeout(struct work_struct
*work
)
1797 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1798 retrans_timer
.work
);
1800 BT_DBG("chan %p", chan
);
1802 l2cap_chan_lock(chan
);
1805 l2cap_chan_unlock(chan
);
1806 l2cap_chan_put(chan
);
1810 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1811 l2cap_chan_unlock(chan
);
1812 l2cap_chan_put(chan
);
1815 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1816 struct sk_buff_head
*skbs
)
1818 struct sk_buff
*skb
;
1819 struct l2cap_ctrl
*control
;
1821 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1823 if (__chan_is_moving(chan
))
1826 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1828 while (!skb_queue_empty(&chan
->tx_q
)) {
1830 skb
= skb_dequeue(&chan
->tx_q
);
1832 bt_cb(skb
)->control
.retries
= 1;
1833 control
= &bt_cb(skb
)->control
;
1835 control
->reqseq
= 0;
1836 control
->txseq
= chan
->next_tx_seq
;
1838 __pack_control(chan
, control
, skb
);
1840 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1841 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1842 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1845 l2cap_do_send(chan
, skb
);
1847 BT_DBG("Sent txseq %u", control
->txseq
);
1849 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1850 chan
->frames_sent
++;
1854 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1856 struct sk_buff
*skb
, *tx_skb
;
1857 struct l2cap_ctrl
*control
;
1860 BT_DBG("chan %p", chan
);
1862 if (chan
->state
!= BT_CONNECTED
)
1865 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1868 if (__chan_is_moving(chan
))
1871 while (chan
->tx_send_head
&&
1872 chan
->unacked_frames
< chan
->remote_tx_win
&&
1873 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1875 skb
= chan
->tx_send_head
;
1877 bt_cb(skb
)->control
.retries
= 1;
1878 control
= &bt_cb(skb
)->control
;
1880 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1883 control
->reqseq
= chan
->buffer_seq
;
1884 chan
->last_acked_seq
= chan
->buffer_seq
;
1885 control
->txseq
= chan
->next_tx_seq
;
1887 __pack_control(chan
, control
, skb
);
1889 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1890 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1891 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1894 /* Clone after data has been modified. Data is assumed to be
1895 read-only (for locking purposes) on cloned sk_buffs.
1897 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1902 __set_retrans_timer(chan
);
1904 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1905 chan
->unacked_frames
++;
1906 chan
->frames_sent
++;
1909 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1910 chan
->tx_send_head
= NULL
;
1912 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1914 l2cap_do_send(chan
, tx_skb
);
1915 BT_DBG("Sent txseq %u", control
->txseq
);
1918 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1919 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1924 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1926 struct l2cap_ctrl control
;
1927 struct sk_buff
*skb
;
1928 struct sk_buff
*tx_skb
;
1931 BT_DBG("chan %p", chan
);
1933 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1936 if (__chan_is_moving(chan
))
1939 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1940 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1942 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1944 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1949 bt_cb(skb
)->control
.retries
++;
1950 control
= bt_cb(skb
)->control
;
1952 if (chan
->max_tx
!= 0 &&
1953 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1954 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1955 l2cap_send_disconn_req(chan
, ECONNRESET
);
1956 l2cap_seq_list_clear(&chan
->retrans_list
);
1960 control
.reqseq
= chan
->buffer_seq
;
1961 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1966 if (skb_cloned(skb
)) {
1967 /* Cloned sk_buffs are read-only, so we need a
1970 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1972 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1976 l2cap_seq_list_clear(&chan
->retrans_list
);
1980 /* Update skb contents */
1981 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1982 put_unaligned_le32(__pack_extended_control(&control
),
1983 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1985 put_unaligned_le16(__pack_enhanced_control(&control
),
1986 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1990 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1991 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
1992 tx_skb
->len
- L2CAP_FCS_SIZE
);
1993 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
1997 l2cap_do_send(chan
, tx_skb
);
1999 BT_DBG("Resent txseq %d", control
.txseq
);
2001 chan
->last_acked_seq
= chan
->buffer_seq
;
2005 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2006 struct l2cap_ctrl
*control
)
2008 BT_DBG("chan %p, control %p", chan
, control
);
2010 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2011 l2cap_ertm_resend(chan
);
2014 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2015 struct l2cap_ctrl
*control
)
2017 struct sk_buff
*skb
;
2019 BT_DBG("chan %p, control %p", chan
, control
);
2022 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2024 l2cap_seq_list_clear(&chan
->retrans_list
);
2026 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2029 if (chan
->unacked_frames
) {
2030 skb_queue_walk(&chan
->tx_q
, skb
) {
2031 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2032 skb
== chan
->tx_send_head
)
2036 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2037 if (skb
== chan
->tx_send_head
)
2040 l2cap_seq_list_append(&chan
->retrans_list
,
2041 bt_cb(skb
)->control
.txseq
);
2044 l2cap_ertm_resend(chan
);
2048 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2050 struct l2cap_ctrl control
;
2051 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2052 chan
->last_acked_seq
);
2055 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2056 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2058 memset(&control
, 0, sizeof(control
));
2061 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2062 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2063 __clear_ack_timer(chan
);
2064 control
.super
= L2CAP_SUPER_RNR
;
2065 control
.reqseq
= chan
->buffer_seq
;
2066 l2cap_send_sframe(chan
, &control
);
2068 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2069 l2cap_ertm_send(chan
);
2070 /* If any i-frames were sent, they included an ack */
2071 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2075 /* Ack now if the window is 3/4ths full.
2076 * Calculate without mul or div
2078 threshold
= chan
->ack_win
;
2079 threshold
+= threshold
<< 1;
2082 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2085 if (frames_to_ack
>= threshold
) {
2086 __clear_ack_timer(chan
);
2087 control
.super
= L2CAP_SUPER_RR
;
2088 control
.reqseq
= chan
->buffer_seq
;
2089 l2cap_send_sframe(chan
, &control
);
2094 __set_ack_timer(chan
);
2098 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2099 struct msghdr
*msg
, int len
,
2100 int count
, struct sk_buff
*skb
)
2102 struct l2cap_conn
*conn
= chan
->conn
;
2103 struct sk_buff
**frag
;
2106 if (copy_from_iter(skb_put(skb
, count
), count
, &msg
->msg_iter
) != count
)
2112 /* Continuation fragments (no L2CAP header) */
2113 frag
= &skb_shinfo(skb
)->frag_list
;
2115 struct sk_buff
*tmp
;
2117 count
= min_t(unsigned int, conn
->mtu
, len
);
2119 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2120 msg
->msg_flags
& MSG_DONTWAIT
);
2122 return PTR_ERR(tmp
);
2126 if (copy_from_iter(skb_put(*frag
, count
), count
,
2127 &msg
->msg_iter
) != count
)
2133 skb
->len
+= (*frag
)->len
;
2134 skb
->data_len
+= (*frag
)->len
;
2136 frag
= &(*frag
)->next
;
2142 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2143 struct msghdr
*msg
, size_t len
)
2145 struct l2cap_conn
*conn
= chan
->conn
;
2146 struct sk_buff
*skb
;
2147 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2148 struct l2cap_hdr
*lh
;
2150 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2151 __le16_to_cpu(chan
->psm
), len
);
2153 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2155 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2156 msg
->msg_flags
& MSG_DONTWAIT
);
2160 /* Create L2CAP header */
2161 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2162 lh
->cid
= cpu_to_le16(chan
->dcid
);
2163 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2164 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2166 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2167 if (unlikely(err
< 0)) {
2169 return ERR_PTR(err
);
2174 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2175 struct msghdr
*msg
, size_t len
)
2177 struct l2cap_conn
*conn
= chan
->conn
;
2178 struct sk_buff
*skb
;
2180 struct l2cap_hdr
*lh
;
2182 BT_DBG("chan %p len %zu", chan
, len
);
2184 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2186 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2187 msg
->msg_flags
& MSG_DONTWAIT
);
2191 /* Create L2CAP header */
2192 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2193 lh
->cid
= cpu_to_le16(chan
->dcid
);
2194 lh
->len
= cpu_to_le16(len
);
2196 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2197 if (unlikely(err
< 0)) {
2199 return ERR_PTR(err
);
2204 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2205 struct msghdr
*msg
, size_t len
,
2208 struct l2cap_conn
*conn
= chan
->conn
;
2209 struct sk_buff
*skb
;
2210 int err
, count
, hlen
;
2211 struct l2cap_hdr
*lh
;
2213 BT_DBG("chan %p len %zu", chan
, len
);
2216 return ERR_PTR(-ENOTCONN
);
2218 hlen
= __ertm_hdr_size(chan
);
2221 hlen
+= L2CAP_SDULEN_SIZE
;
2223 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2224 hlen
+= L2CAP_FCS_SIZE
;
2226 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2228 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2229 msg
->msg_flags
& MSG_DONTWAIT
);
2233 /* Create L2CAP header */
2234 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2235 lh
->cid
= cpu_to_le16(chan
->dcid
);
2236 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2238 /* Control header is populated later */
2239 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2240 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2242 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2245 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2247 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2248 if (unlikely(err
< 0)) {
2250 return ERR_PTR(err
);
2253 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2254 bt_cb(skb
)->control
.retries
= 0;
2258 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2259 struct sk_buff_head
*seg_queue
,
2260 struct msghdr
*msg
, size_t len
)
2262 struct sk_buff
*skb
;
2267 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2269 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2270 * so fragmented skbs are not used. The HCI layer's handling
2271 * of fragmented skbs is not compatible with ERTM's queueing.
2274 /* PDU size is derived from the HCI MTU */
2275 pdu_len
= chan
->conn
->mtu
;
2277 /* Constrain PDU size for BR/EDR connections */
2279 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2281 /* Adjust for largest possible L2CAP overhead. */
2283 pdu_len
-= L2CAP_FCS_SIZE
;
2285 pdu_len
-= __ertm_hdr_size(chan
);
2287 /* Remote device may have requested smaller PDUs */
2288 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2290 if (len
<= pdu_len
) {
2291 sar
= L2CAP_SAR_UNSEGMENTED
;
2295 sar
= L2CAP_SAR_START
;
2300 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2303 __skb_queue_purge(seg_queue
);
2304 return PTR_ERR(skb
);
2307 bt_cb(skb
)->control
.sar
= sar
;
2308 __skb_queue_tail(seg_queue
, skb
);
2314 if (len
<= pdu_len
) {
2315 sar
= L2CAP_SAR_END
;
2318 sar
= L2CAP_SAR_CONTINUE
;
2325 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2327 size_t len
, u16 sdulen
)
2329 struct l2cap_conn
*conn
= chan
->conn
;
2330 struct sk_buff
*skb
;
2331 int err
, count
, hlen
;
2332 struct l2cap_hdr
*lh
;
2334 BT_DBG("chan %p len %zu", chan
, len
);
2337 return ERR_PTR(-ENOTCONN
);
2339 hlen
= L2CAP_HDR_SIZE
;
2342 hlen
+= L2CAP_SDULEN_SIZE
;
2344 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2346 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2347 msg
->msg_flags
& MSG_DONTWAIT
);
2351 /* Create L2CAP header */
2352 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2353 lh
->cid
= cpu_to_le16(chan
->dcid
);
2354 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2357 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2359 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2360 if (unlikely(err
< 0)) {
2362 return ERR_PTR(err
);
2368 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2369 struct sk_buff_head
*seg_queue
,
2370 struct msghdr
*msg
, size_t len
)
2372 struct sk_buff
*skb
;
2376 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2379 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2385 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2387 __skb_queue_purge(seg_queue
);
2388 return PTR_ERR(skb
);
2391 __skb_queue_tail(seg_queue
, skb
);
2397 pdu_len
+= L2CAP_SDULEN_SIZE
;
2404 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2406 struct sk_buff
*skb
;
2408 struct sk_buff_head seg_queue
;
2413 /* Connectionless channel */
2414 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2415 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2417 return PTR_ERR(skb
);
2419 /* Channel lock is released before requesting new skb and then
2420 * reacquired thus we need to recheck channel state.
2422 if (chan
->state
!= BT_CONNECTED
) {
2427 l2cap_do_send(chan
, skb
);
2431 switch (chan
->mode
) {
2432 case L2CAP_MODE_LE_FLOWCTL
:
2433 /* Check outgoing MTU */
2434 if (len
> chan
->omtu
)
2437 if (!chan
->tx_credits
)
2440 __skb_queue_head_init(&seg_queue
);
2442 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2444 if (chan
->state
!= BT_CONNECTED
) {
2445 __skb_queue_purge(&seg_queue
);
2452 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2454 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2455 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2459 if (!chan
->tx_credits
)
2460 chan
->ops
->suspend(chan
);
2466 case L2CAP_MODE_BASIC
:
2467 /* Check outgoing MTU */
2468 if (len
> chan
->omtu
)
2471 /* Create a basic PDU */
2472 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2474 return PTR_ERR(skb
);
2476 /* Channel lock is released before requesting new skb and then
2477 * reacquired thus we need to recheck channel state.
2479 if (chan
->state
!= BT_CONNECTED
) {
2484 l2cap_do_send(chan
, skb
);
2488 case L2CAP_MODE_ERTM
:
2489 case L2CAP_MODE_STREAMING
:
2490 /* Check outgoing MTU */
2491 if (len
> chan
->omtu
) {
2496 __skb_queue_head_init(&seg_queue
);
2498 /* Do segmentation before calling in to the state machine,
2499 * since it's possible to block while waiting for memory
2502 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2504 /* The channel could have been closed while segmenting,
2505 * check that it is still connected.
2507 if (chan
->state
!= BT_CONNECTED
) {
2508 __skb_queue_purge(&seg_queue
);
2515 if (chan
->mode
== L2CAP_MODE_ERTM
)
2516 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2518 l2cap_streaming_send(chan
, &seg_queue
);
2522 /* If the skbs were not queued for sending, they'll still be in
2523 * seg_queue and need to be purged.
2525 __skb_queue_purge(&seg_queue
);
2529 BT_DBG("bad state %1.1x", chan
->mode
);
2535 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2537 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2539 struct l2cap_ctrl control
;
2542 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2544 memset(&control
, 0, sizeof(control
));
2546 control
.super
= L2CAP_SUPER_SREJ
;
2548 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2549 seq
= __next_seq(chan
, seq
)) {
2550 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2551 control
.reqseq
= seq
;
2552 l2cap_send_sframe(chan
, &control
);
2553 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2557 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2560 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2562 struct l2cap_ctrl control
;
2564 BT_DBG("chan %p", chan
);
2566 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2569 memset(&control
, 0, sizeof(control
));
2571 control
.super
= L2CAP_SUPER_SREJ
;
2572 control
.reqseq
= chan
->srej_list
.tail
;
2573 l2cap_send_sframe(chan
, &control
);
2576 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2578 struct l2cap_ctrl control
;
2582 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2584 memset(&control
, 0, sizeof(control
));
2586 control
.super
= L2CAP_SUPER_SREJ
;
2588 /* Capture initial list head to allow only one pass through the list. */
2589 initial_head
= chan
->srej_list
.head
;
2592 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2593 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2596 control
.reqseq
= seq
;
2597 l2cap_send_sframe(chan
, &control
);
2598 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2599 } while (chan
->srej_list
.head
!= initial_head
);
2602 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2604 struct sk_buff
*acked_skb
;
2607 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2609 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2612 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2613 chan
->expected_ack_seq
, chan
->unacked_frames
);
2615 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2616 ackseq
= __next_seq(chan
, ackseq
)) {
2618 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2620 skb_unlink(acked_skb
, &chan
->tx_q
);
2621 kfree_skb(acked_skb
);
2622 chan
->unacked_frames
--;
2626 chan
->expected_ack_seq
= reqseq
;
2628 if (chan
->unacked_frames
== 0)
2629 __clear_retrans_timer(chan
);
2631 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2634 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2636 BT_DBG("chan %p", chan
);
2638 chan
->expected_tx_seq
= chan
->buffer_seq
;
2639 l2cap_seq_list_clear(&chan
->srej_list
);
2640 skb_queue_purge(&chan
->srej_q
);
2641 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2644 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2645 struct l2cap_ctrl
*control
,
2646 struct sk_buff_head
*skbs
, u8 event
)
2648 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2652 case L2CAP_EV_DATA_REQUEST
:
2653 if (chan
->tx_send_head
== NULL
)
2654 chan
->tx_send_head
= skb_peek(skbs
);
2656 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2657 l2cap_ertm_send(chan
);
2659 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2660 BT_DBG("Enter LOCAL_BUSY");
2661 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2663 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2664 /* The SREJ_SENT state must be aborted if we are to
2665 * enter the LOCAL_BUSY state.
2667 l2cap_abort_rx_srej_sent(chan
);
2670 l2cap_send_ack(chan
);
2673 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2674 BT_DBG("Exit LOCAL_BUSY");
2675 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2677 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2678 struct l2cap_ctrl local_control
;
2680 memset(&local_control
, 0, sizeof(local_control
));
2681 local_control
.sframe
= 1;
2682 local_control
.super
= L2CAP_SUPER_RR
;
2683 local_control
.poll
= 1;
2684 local_control
.reqseq
= chan
->buffer_seq
;
2685 l2cap_send_sframe(chan
, &local_control
);
2687 chan
->retry_count
= 1;
2688 __set_monitor_timer(chan
);
2689 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2692 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2693 l2cap_process_reqseq(chan
, control
->reqseq
);
2695 case L2CAP_EV_EXPLICIT_POLL
:
2696 l2cap_send_rr_or_rnr(chan
, 1);
2697 chan
->retry_count
= 1;
2698 __set_monitor_timer(chan
);
2699 __clear_ack_timer(chan
);
2700 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2702 case L2CAP_EV_RETRANS_TO
:
2703 l2cap_send_rr_or_rnr(chan
, 1);
2704 chan
->retry_count
= 1;
2705 __set_monitor_timer(chan
);
2706 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2708 case L2CAP_EV_RECV_FBIT
:
2709 /* Nothing to process */
2716 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2717 struct l2cap_ctrl
*control
,
2718 struct sk_buff_head
*skbs
, u8 event
)
2720 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2724 case L2CAP_EV_DATA_REQUEST
:
2725 if (chan
->tx_send_head
== NULL
)
2726 chan
->tx_send_head
= skb_peek(skbs
);
2727 /* Queue data, but don't send. */
2728 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2730 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2731 BT_DBG("Enter LOCAL_BUSY");
2732 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2734 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2735 /* The SREJ_SENT state must be aborted if we are to
2736 * enter the LOCAL_BUSY state.
2738 l2cap_abort_rx_srej_sent(chan
);
2741 l2cap_send_ack(chan
);
2744 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2745 BT_DBG("Exit LOCAL_BUSY");
2746 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2748 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2749 struct l2cap_ctrl local_control
;
2750 memset(&local_control
, 0, sizeof(local_control
));
2751 local_control
.sframe
= 1;
2752 local_control
.super
= L2CAP_SUPER_RR
;
2753 local_control
.poll
= 1;
2754 local_control
.reqseq
= chan
->buffer_seq
;
2755 l2cap_send_sframe(chan
, &local_control
);
2757 chan
->retry_count
= 1;
2758 __set_monitor_timer(chan
);
2759 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2762 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2763 l2cap_process_reqseq(chan
, control
->reqseq
);
2767 case L2CAP_EV_RECV_FBIT
:
2768 if (control
&& control
->final
) {
2769 __clear_monitor_timer(chan
);
2770 if (chan
->unacked_frames
> 0)
2771 __set_retrans_timer(chan
);
2772 chan
->retry_count
= 0;
2773 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2774 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2777 case L2CAP_EV_EXPLICIT_POLL
:
2780 case L2CAP_EV_MONITOR_TO
:
2781 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2782 l2cap_send_rr_or_rnr(chan
, 1);
2783 __set_monitor_timer(chan
);
2784 chan
->retry_count
++;
2786 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2794 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2795 struct sk_buff_head
*skbs
, u8 event
)
2797 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2798 chan
, control
, skbs
, event
, chan
->tx_state
);
2800 switch (chan
->tx_state
) {
2801 case L2CAP_TX_STATE_XMIT
:
2802 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2804 case L2CAP_TX_STATE_WAIT_F
:
2805 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2813 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2814 struct l2cap_ctrl
*control
)
2816 BT_DBG("chan %p, control %p", chan
, control
);
2817 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2820 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2821 struct l2cap_ctrl
*control
)
2823 BT_DBG("chan %p, control %p", chan
, control
);
2824 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2827 /* Copy frame to all raw sockets on that connection */
2828 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2830 struct sk_buff
*nskb
;
2831 struct l2cap_chan
*chan
;
2833 BT_DBG("conn %p", conn
);
2835 mutex_lock(&conn
->chan_lock
);
2837 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2838 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2841 /* Don't send frame to the channel it came from */
2842 if (bt_cb(skb
)->chan
== chan
)
2845 nskb
= skb_clone(skb
, GFP_KERNEL
);
2848 if (chan
->ops
->recv(chan
, nskb
))
2852 mutex_unlock(&conn
->chan_lock
);
2855 /* ---- L2CAP signalling commands ---- */
2856 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2857 u8 ident
, u16 dlen
, void *data
)
2859 struct sk_buff
*skb
, **frag
;
2860 struct l2cap_cmd_hdr
*cmd
;
2861 struct l2cap_hdr
*lh
;
2864 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2865 conn
, code
, ident
, dlen
);
2867 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2870 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2871 count
= min_t(unsigned int, conn
->mtu
, len
);
2873 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2877 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2878 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2880 if (conn
->hcon
->type
== LE_LINK
)
2881 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2883 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2885 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2888 cmd
->len
= cpu_to_le16(dlen
);
2891 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2892 memcpy(skb_put(skb
, count
), data
, count
);
2898 /* Continuation fragments (no L2CAP header) */
2899 frag
= &skb_shinfo(skb
)->frag_list
;
2901 count
= min_t(unsigned int, conn
->mtu
, len
);
2903 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2907 memcpy(skb_put(*frag
, count
), data
, count
);
2912 frag
= &(*frag
)->next
;
2922 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2925 struct l2cap_conf_opt
*opt
= *ptr
;
2928 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2936 *val
= *((u8
*) opt
->val
);
2940 *val
= get_unaligned_le16(opt
->val
);
2944 *val
= get_unaligned_le32(opt
->val
);
2948 *val
= (unsigned long) opt
->val
;
2952 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2956 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2958 struct l2cap_conf_opt
*opt
= *ptr
;
2960 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2967 *((u8
*) opt
->val
) = val
;
2971 put_unaligned_le16(val
, opt
->val
);
2975 put_unaligned_le32(val
, opt
->val
);
2979 memcpy(opt
->val
, (void *) val
, len
);
2983 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2986 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2988 struct l2cap_conf_efs efs
;
2990 switch (chan
->mode
) {
2991 case L2CAP_MODE_ERTM
:
2992 efs
.id
= chan
->local_id
;
2993 efs
.stype
= chan
->local_stype
;
2994 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2995 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2996 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2997 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3000 case L2CAP_MODE_STREAMING
:
3002 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3003 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3004 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3013 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3014 (unsigned long) &efs
);
3017 static void l2cap_ack_timeout(struct work_struct
*work
)
3019 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3023 BT_DBG("chan %p", chan
);
3025 l2cap_chan_lock(chan
);
3027 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3028 chan
->last_acked_seq
);
3031 l2cap_send_rr_or_rnr(chan
, 0);
3033 l2cap_chan_unlock(chan
);
3034 l2cap_chan_put(chan
);
3037 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3041 chan
->next_tx_seq
= 0;
3042 chan
->expected_tx_seq
= 0;
3043 chan
->expected_ack_seq
= 0;
3044 chan
->unacked_frames
= 0;
3045 chan
->buffer_seq
= 0;
3046 chan
->frames_sent
= 0;
3047 chan
->last_acked_seq
= 0;
3049 chan
->sdu_last_frag
= NULL
;
3052 skb_queue_head_init(&chan
->tx_q
);
3054 chan
->local_amp_id
= AMP_ID_BREDR
;
3055 chan
->move_id
= AMP_ID_BREDR
;
3056 chan
->move_state
= L2CAP_MOVE_STABLE
;
3057 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3059 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3062 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3063 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3065 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3066 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3067 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3069 skb_queue_head_init(&chan
->srej_q
);
3071 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3075 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3077 l2cap_seq_list_free(&chan
->srej_list
);
3082 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3085 case L2CAP_MODE_STREAMING
:
3086 case L2CAP_MODE_ERTM
:
3087 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3091 return L2CAP_MODE_BASIC
;
3095 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3097 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3098 (conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
));
3101 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3103 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3104 (conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
));
3107 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3108 struct l2cap_conf_rfc
*rfc
)
3110 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3111 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3113 /* Class 1 devices have must have ERTM timeouts
3114 * exceeding the Link Supervision Timeout. The
3115 * default Link Supervision Timeout for AMP
3116 * controllers is 10 seconds.
3118 * Class 1 devices use 0xffffffff for their
3119 * best-effort flush timeout, so the clamping logic
3120 * will result in a timeout that meets the above
3121 * requirement. ERTM timeouts are 16-bit values, so
3122 * the maximum timeout is 65.535 seconds.
3125 /* Convert timeout to milliseconds and round */
3126 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3128 /* This is the recommended formula for class 2 devices
3129 * that start ERTM timers when packets are sent to the
3132 ertm_to
= 3 * ertm_to
+ 500;
3134 if (ertm_to
> 0xffff)
3137 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3138 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3140 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3141 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3145 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3147 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3148 __l2cap_ews_supported(chan
->conn
)) {
3149 /* use extended control field */
3150 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3151 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3153 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3154 L2CAP_DEFAULT_TX_WINDOW
);
3155 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3157 chan
->ack_win
= chan
->tx_win
;
3160 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3162 struct l2cap_conf_req
*req
= data
;
3163 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3164 void *ptr
= req
->data
;
3167 BT_DBG("chan %p", chan
);
3169 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3172 switch (chan
->mode
) {
3173 case L2CAP_MODE_STREAMING
:
3174 case L2CAP_MODE_ERTM
:
3175 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3178 if (__l2cap_efs_supported(chan
->conn
))
3179 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3183 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3188 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3189 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3191 switch (chan
->mode
) {
3192 case L2CAP_MODE_BASIC
:
3196 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3197 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3200 rfc
.mode
= L2CAP_MODE_BASIC
;
3202 rfc
.max_transmit
= 0;
3203 rfc
.retrans_timeout
= 0;
3204 rfc
.monitor_timeout
= 0;
3205 rfc
.max_pdu_size
= 0;
3207 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3208 (unsigned long) &rfc
);
3211 case L2CAP_MODE_ERTM
:
3212 rfc
.mode
= L2CAP_MODE_ERTM
;
3213 rfc
.max_transmit
= chan
->max_tx
;
3215 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3217 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3218 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3220 rfc
.max_pdu_size
= cpu_to_le16(size
);
3222 l2cap_txwin_setup(chan
);
3224 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3225 L2CAP_DEFAULT_TX_WINDOW
);
3227 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3228 (unsigned long) &rfc
);
3230 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3231 l2cap_add_opt_efs(&ptr
, chan
);
3233 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3234 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3237 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3238 if (chan
->fcs
== L2CAP_FCS_NONE
||
3239 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3240 chan
->fcs
= L2CAP_FCS_NONE
;
3241 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3246 case L2CAP_MODE_STREAMING
:
3247 l2cap_txwin_setup(chan
);
3248 rfc
.mode
= L2CAP_MODE_STREAMING
;
3250 rfc
.max_transmit
= 0;
3251 rfc
.retrans_timeout
= 0;
3252 rfc
.monitor_timeout
= 0;
3254 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3255 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3257 rfc
.max_pdu_size
= cpu_to_le16(size
);
3259 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3260 (unsigned long) &rfc
);
3262 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3263 l2cap_add_opt_efs(&ptr
, chan
);
3265 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3266 if (chan
->fcs
== L2CAP_FCS_NONE
||
3267 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3268 chan
->fcs
= L2CAP_FCS_NONE
;
3269 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3275 req
->dcid
= cpu_to_le16(chan
->dcid
);
3276 req
->flags
= cpu_to_le16(0);
3281 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3283 struct l2cap_conf_rsp
*rsp
= data
;
3284 void *ptr
= rsp
->data
;
3285 void *req
= chan
->conf_req
;
3286 int len
= chan
->conf_len
;
3287 int type
, hint
, olen
;
3289 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3290 struct l2cap_conf_efs efs
;
3292 u16 mtu
= L2CAP_DEFAULT_MTU
;
3293 u16 result
= L2CAP_CONF_SUCCESS
;
3296 BT_DBG("chan %p", chan
);
3298 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3299 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3301 hint
= type
& L2CAP_CONF_HINT
;
3302 type
&= L2CAP_CONF_MASK
;
3305 case L2CAP_CONF_MTU
:
3309 case L2CAP_CONF_FLUSH_TO
:
3310 chan
->flush_to
= val
;
3313 case L2CAP_CONF_QOS
:
3316 case L2CAP_CONF_RFC
:
3317 if (olen
== sizeof(rfc
))
3318 memcpy(&rfc
, (void *) val
, olen
);
3321 case L2CAP_CONF_FCS
:
3322 if (val
== L2CAP_FCS_NONE
)
3323 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3326 case L2CAP_CONF_EFS
:
3328 if (olen
== sizeof(efs
))
3329 memcpy(&efs
, (void *) val
, olen
);
3332 case L2CAP_CONF_EWS
:
3333 if (!(chan
->conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
3334 return -ECONNREFUSED
;
3336 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3337 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3338 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3339 chan
->remote_tx_win
= val
;
3346 result
= L2CAP_CONF_UNKNOWN
;
3347 *((u8
*) ptr
++) = type
;
3352 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3355 switch (chan
->mode
) {
3356 case L2CAP_MODE_STREAMING
:
3357 case L2CAP_MODE_ERTM
:
3358 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3359 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3360 chan
->conn
->feat_mask
);
3365 if (__l2cap_efs_supported(chan
->conn
))
3366 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3368 return -ECONNREFUSED
;
3371 if (chan
->mode
!= rfc
.mode
)
3372 return -ECONNREFUSED
;
3378 if (chan
->mode
!= rfc
.mode
) {
3379 result
= L2CAP_CONF_UNACCEPT
;
3380 rfc
.mode
= chan
->mode
;
3382 if (chan
->num_conf_rsp
== 1)
3383 return -ECONNREFUSED
;
3385 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3386 (unsigned long) &rfc
);
3389 if (result
== L2CAP_CONF_SUCCESS
) {
3390 /* Configure output options and let the other side know
3391 * which ones we don't like. */
3393 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3394 result
= L2CAP_CONF_UNACCEPT
;
3397 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3399 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3402 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3403 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3404 efs
.stype
!= chan
->local_stype
) {
3406 result
= L2CAP_CONF_UNACCEPT
;
3408 if (chan
->num_conf_req
>= 1)
3409 return -ECONNREFUSED
;
3411 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3413 (unsigned long) &efs
);
3415 /* Send PENDING Conf Rsp */
3416 result
= L2CAP_CONF_PENDING
;
3417 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3422 case L2CAP_MODE_BASIC
:
3423 chan
->fcs
= L2CAP_FCS_NONE
;
3424 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3427 case L2CAP_MODE_ERTM
:
3428 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3429 chan
->remote_tx_win
= rfc
.txwin_size
;
3431 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3433 chan
->remote_max_tx
= rfc
.max_transmit
;
3435 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3436 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3437 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3438 rfc
.max_pdu_size
= cpu_to_le16(size
);
3439 chan
->remote_mps
= size
;
3441 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3443 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3445 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3446 sizeof(rfc
), (unsigned long) &rfc
);
3448 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3449 chan
->remote_id
= efs
.id
;
3450 chan
->remote_stype
= efs
.stype
;
3451 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3452 chan
->remote_flush_to
=
3453 le32_to_cpu(efs
.flush_to
);
3454 chan
->remote_acc_lat
=
3455 le32_to_cpu(efs
.acc_lat
);
3456 chan
->remote_sdu_itime
=
3457 le32_to_cpu(efs
.sdu_itime
);
3458 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3460 (unsigned long) &efs
);
3464 case L2CAP_MODE_STREAMING
:
3465 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3466 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3467 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3468 rfc
.max_pdu_size
= cpu_to_le16(size
);
3469 chan
->remote_mps
= size
;
3471 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3473 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3474 (unsigned long) &rfc
);
3479 result
= L2CAP_CONF_UNACCEPT
;
3481 memset(&rfc
, 0, sizeof(rfc
));
3482 rfc
.mode
= chan
->mode
;
3485 if (result
== L2CAP_CONF_SUCCESS
)
3486 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3488 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3489 rsp
->result
= cpu_to_le16(result
);
3490 rsp
->flags
= cpu_to_le16(0);
3495 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3496 void *data
, u16
*result
)
3498 struct l2cap_conf_req
*req
= data
;
3499 void *ptr
= req
->data
;
3502 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3503 struct l2cap_conf_efs efs
;
3505 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3507 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3508 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3511 case L2CAP_CONF_MTU
:
3512 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3513 *result
= L2CAP_CONF_UNACCEPT
;
3514 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3517 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3520 case L2CAP_CONF_FLUSH_TO
:
3521 chan
->flush_to
= val
;
3522 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3526 case L2CAP_CONF_RFC
:
3527 if (olen
== sizeof(rfc
))
3528 memcpy(&rfc
, (void *)val
, olen
);
3530 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3531 rfc
.mode
!= chan
->mode
)
3532 return -ECONNREFUSED
;
3536 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3537 sizeof(rfc
), (unsigned long) &rfc
);
3540 case L2CAP_CONF_EWS
:
3541 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3542 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3546 case L2CAP_CONF_EFS
:
3547 if (olen
== sizeof(efs
))
3548 memcpy(&efs
, (void *)val
, olen
);
3550 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3551 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3552 efs
.stype
!= chan
->local_stype
)
3553 return -ECONNREFUSED
;
3555 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3556 (unsigned long) &efs
);
3559 case L2CAP_CONF_FCS
:
3560 if (*result
== L2CAP_CONF_PENDING
)
3561 if (val
== L2CAP_FCS_NONE
)
3562 set_bit(CONF_RECV_NO_FCS
,
3568 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3569 return -ECONNREFUSED
;
3571 chan
->mode
= rfc
.mode
;
3573 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3575 case L2CAP_MODE_ERTM
:
3576 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3577 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3578 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3579 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3580 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3583 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3584 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3585 chan
->local_sdu_itime
=
3586 le32_to_cpu(efs
.sdu_itime
);
3587 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3588 chan
->local_flush_to
=
3589 le32_to_cpu(efs
.flush_to
);
3593 case L2CAP_MODE_STREAMING
:
3594 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3598 req
->dcid
= cpu_to_le16(chan
->dcid
);
3599 req
->flags
= cpu_to_le16(0);
3604 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3605 u16 result
, u16 flags
)
3607 struct l2cap_conf_rsp
*rsp
= data
;
3608 void *ptr
= rsp
->data
;
3610 BT_DBG("chan %p", chan
);
3612 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3613 rsp
->result
= cpu_to_le16(result
);
3614 rsp
->flags
= cpu_to_le16(flags
);
3619 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3621 struct l2cap_le_conn_rsp rsp
;
3622 struct l2cap_conn
*conn
= chan
->conn
;
3624 BT_DBG("chan %p", chan
);
3626 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3627 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3628 rsp
.mps
= cpu_to_le16(chan
->mps
);
3629 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3630 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3632 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3636 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3638 struct l2cap_conn_rsp rsp
;
3639 struct l2cap_conn
*conn
= chan
->conn
;
3643 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3644 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3645 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3646 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3649 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3651 rsp_code
= L2CAP_CONN_RSP
;
3653 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3655 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3657 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3660 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3661 l2cap_build_conf_req(chan
, buf
), buf
);
3662 chan
->num_conf_req
++;
3665 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3669 /* Use sane default values in case a misbehaving remote device
3670 * did not send an RFC or extended window size option.
3672 u16 txwin_ext
= chan
->ack_win
;
3673 struct l2cap_conf_rfc rfc
= {
3675 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3676 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3677 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3678 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3681 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3683 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3686 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3687 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3690 case L2CAP_CONF_RFC
:
3691 if (olen
== sizeof(rfc
))
3692 memcpy(&rfc
, (void *)val
, olen
);
3694 case L2CAP_CONF_EWS
:
3701 case L2CAP_MODE_ERTM
:
3702 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3703 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3704 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3705 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3706 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3708 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3711 case L2CAP_MODE_STREAMING
:
3712 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3716 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3717 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3720 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3722 if (cmd_len
< sizeof(*rej
))
3725 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3728 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3729 cmd
->ident
== conn
->info_ident
) {
3730 cancel_delayed_work(&conn
->info_timer
);
3732 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3733 conn
->info_ident
= 0;
3735 l2cap_conn_start(conn
);
3741 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3742 struct l2cap_cmd_hdr
*cmd
,
3743 u8
*data
, u8 rsp_code
, u8 amp_id
)
3745 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3746 struct l2cap_conn_rsp rsp
;
3747 struct l2cap_chan
*chan
= NULL
, *pchan
;
3748 int result
, status
= L2CAP_CS_NO_INFO
;
3750 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3751 __le16 psm
= req
->psm
;
3753 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3755 /* Check if we have socket listening on psm */
3756 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3757 &conn
->hcon
->dst
, ACL_LINK
);
3759 result
= L2CAP_CR_BAD_PSM
;
3763 mutex_lock(&conn
->chan_lock
);
3764 l2cap_chan_lock(pchan
);
3766 /* Check if the ACL is secure enough (if not SDP) */
3767 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3768 !hci_conn_check_link_mode(conn
->hcon
)) {
3769 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3770 result
= L2CAP_CR_SEC_BLOCK
;
3774 result
= L2CAP_CR_NO_MEM
;
3776 /* Check if we already have channel with that dcid */
3777 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3780 chan
= pchan
->ops
->new_connection(pchan
);
3784 /* For certain devices (ex: HID mouse), support for authentication,
3785 * pairing and bonding is optional. For such devices, inorder to avoid
3786 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3787 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3789 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3791 bacpy(&chan
->src
, &conn
->hcon
->src
);
3792 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3793 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3794 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3797 chan
->local_amp_id
= amp_id
;
3799 __l2cap_chan_add(conn
, chan
);
3803 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3805 chan
->ident
= cmd
->ident
;
3807 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3808 if (l2cap_chan_check_security(chan
, false)) {
3809 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3810 l2cap_state_change(chan
, BT_CONNECT2
);
3811 result
= L2CAP_CR_PEND
;
3812 status
= L2CAP_CS_AUTHOR_PEND
;
3813 chan
->ops
->defer(chan
);
3815 /* Force pending result for AMP controllers.
3816 * The connection will succeed after the
3817 * physical link is up.
3819 if (amp_id
== AMP_ID_BREDR
) {
3820 l2cap_state_change(chan
, BT_CONFIG
);
3821 result
= L2CAP_CR_SUCCESS
;
3823 l2cap_state_change(chan
, BT_CONNECT2
);
3824 result
= L2CAP_CR_PEND
;
3826 status
= L2CAP_CS_NO_INFO
;
3829 l2cap_state_change(chan
, BT_CONNECT2
);
3830 result
= L2CAP_CR_PEND
;
3831 status
= L2CAP_CS_AUTHEN_PEND
;
3834 l2cap_state_change(chan
, BT_CONNECT2
);
3835 result
= L2CAP_CR_PEND
;
3836 status
= L2CAP_CS_NO_INFO
;
3840 l2cap_chan_unlock(pchan
);
3841 mutex_unlock(&conn
->chan_lock
);
3842 l2cap_chan_put(pchan
);
3845 rsp
.scid
= cpu_to_le16(scid
);
3846 rsp
.dcid
= cpu_to_le16(dcid
);
3847 rsp
.result
= cpu_to_le16(result
);
3848 rsp
.status
= cpu_to_le16(status
);
3849 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3851 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3852 struct l2cap_info_req info
;
3853 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3855 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3856 conn
->info_ident
= l2cap_get_ident(conn
);
3858 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3860 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3861 sizeof(info
), &info
);
3864 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3865 result
== L2CAP_CR_SUCCESS
) {
3867 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3868 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3869 l2cap_build_conf_req(chan
, buf
), buf
);
3870 chan
->num_conf_req
++;
3876 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3877 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3879 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3880 struct hci_conn
*hcon
= conn
->hcon
;
3882 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3886 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3887 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3888 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
3889 hci_dev_unlock(hdev
);
3891 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3895 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3896 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3899 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3900 u16 scid
, dcid
, result
, status
;
3901 struct l2cap_chan
*chan
;
3905 if (cmd_len
< sizeof(*rsp
))
3908 scid
= __le16_to_cpu(rsp
->scid
);
3909 dcid
= __le16_to_cpu(rsp
->dcid
);
3910 result
= __le16_to_cpu(rsp
->result
);
3911 status
= __le16_to_cpu(rsp
->status
);
3913 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 dcid
, scid
, result
, status
);
3916 mutex_lock(&conn
->chan_lock
);
3919 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3925 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3934 l2cap_chan_lock(chan
);
3937 case L2CAP_CR_SUCCESS
:
3938 l2cap_state_change(chan
, BT_CONFIG
);
3941 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3943 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3946 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3947 l2cap_build_conf_req(chan
, req
), req
);
3948 chan
->num_conf_req
++;
3952 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3956 l2cap_chan_del(chan
, ECONNREFUSED
);
3960 l2cap_chan_unlock(chan
);
3963 mutex_unlock(&conn
->chan_lock
);
3968 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3970 /* FCS is enabled only in ERTM or streaming mode, if one or both
3973 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3974 chan
->fcs
= L2CAP_FCS_NONE
;
3975 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3976 chan
->fcs
= L2CAP_FCS_CRC16
;
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3980 u8 ident
, u16 flags
)
3982 struct l2cap_conn
*conn
= chan
->conn
;
3984 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3987 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3988 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3990 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3991 l2cap_build_conf_rsp(chan
, data
,
3992 L2CAP_CONF_SUCCESS
, flags
), data
);
3995 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
3998 struct l2cap_cmd_rej_cid rej
;
4000 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4001 rej
.scid
= __cpu_to_le16(scid
);
4002 rej
.dcid
= __cpu_to_le16(dcid
);
4004 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4007 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4008 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4011 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4014 struct l2cap_chan
*chan
;
4017 if (cmd_len
< sizeof(*req
))
4020 dcid
= __le16_to_cpu(req
->dcid
);
4021 flags
= __le16_to_cpu(req
->flags
);
4023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4025 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4027 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4031 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4032 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4037 /* Reject if config buffer is too small. */
4038 len
= cmd_len
- sizeof(*req
);
4039 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4040 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4041 l2cap_build_conf_rsp(chan
, rsp
,
4042 L2CAP_CONF_REJECT
, flags
), rsp
);
4047 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4048 chan
->conf_len
+= len
;
4050 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4051 /* Incomplete config. Send empty response. */
4052 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4053 l2cap_build_conf_rsp(chan
, rsp
,
4054 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4058 /* Complete config. */
4059 len
= l2cap_parse_conf_req(chan
, rsp
);
4061 l2cap_send_disconn_req(chan
, ECONNRESET
);
4065 chan
->ident
= cmd
->ident
;
4066 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4067 chan
->num_conf_rsp
++;
4069 /* Reset config buffer. */
4072 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4075 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4076 set_default_fcs(chan
);
4078 if (chan
->mode
== L2CAP_MODE_ERTM
||
4079 chan
->mode
== L2CAP_MODE_STREAMING
)
4080 err
= l2cap_ertm_init(chan
);
4083 l2cap_send_disconn_req(chan
, -err
);
4085 l2cap_chan_ready(chan
);
4090 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4092 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4093 l2cap_build_conf_req(chan
, buf
), buf
);
4094 chan
->num_conf_req
++;
4097 /* Got Conf Rsp PENDING from remote side and assume we sent
4098 Conf Rsp PENDING in the code above */
4099 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4100 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4102 /* check compatibility */
4104 /* Send rsp for BR/EDR channel */
4106 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4108 chan
->ident
= cmd
->ident
;
4112 l2cap_chan_unlock(chan
);
4116 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4117 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4120 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4121 u16 scid
, flags
, result
;
4122 struct l2cap_chan
*chan
;
4123 int len
= cmd_len
- sizeof(*rsp
);
4126 if (cmd_len
< sizeof(*rsp
))
4129 scid
= __le16_to_cpu(rsp
->scid
);
4130 flags
= __le16_to_cpu(rsp
->flags
);
4131 result
= __le16_to_cpu(rsp
->result
);
4133 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4136 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4141 case L2CAP_CONF_SUCCESS
:
4142 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4143 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4146 case L2CAP_CONF_PENDING
:
4147 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4149 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4152 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4155 l2cap_send_disconn_req(chan
, ECONNRESET
);
4159 if (!chan
->hs_hcon
) {
4160 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4163 if (l2cap_check_efs(chan
)) {
4164 amp_create_logical_link(chan
);
4165 chan
->ident
= cmd
->ident
;
4171 case L2CAP_CONF_UNACCEPT
:
4172 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4175 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4176 l2cap_send_disconn_req(chan
, ECONNRESET
);
4180 /* throw out any old stored conf requests */
4181 result
= L2CAP_CONF_SUCCESS
;
4182 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4185 l2cap_send_disconn_req(chan
, ECONNRESET
);
4189 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4190 L2CAP_CONF_REQ
, len
, req
);
4191 chan
->num_conf_req
++;
4192 if (result
!= L2CAP_CONF_SUCCESS
)
4198 l2cap_chan_set_err(chan
, ECONNRESET
);
4200 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4201 l2cap_send_disconn_req(chan
, ECONNRESET
);
4205 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4208 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4210 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4211 set_default_fcs(chan
);
4213 if (chan
->mode
== L2CAP_MODE_ERTM
||
4214 chan
->mode
== L2CAP_MODE_STREAMING
)
4215 err
= l2cap_ertm_init(chan
);
4218 l2cap_send_disconn_req(chan
, -err
);
4220 l2cap_chan_ready(chan
);
4224 l2cap_chan_unlock(chan
);
4228 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4229 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4232 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4233 struct l2cap_disconn_rsp rsp
;
4235 struct l2cap_chan
*chan
;
4237 if (cmd_len
!= sizeof(*req
))
4240 scid
= __le16_to_cpu(req
->scid
);
4241 dcid
= __le16_to_cpu(req
->dcid
);
4243 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4245 mutex_lock(&conn
->chan_lock
);
4247 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4249 mutex_unlock(&conn
->chan_lock
);
4250 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4254 l2cap_chan_lock(chan
);
4256 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4257 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4258 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4260 chan
->ops
->set_shutdown(chan
);
4262 l2cap_chan_hold(chan
);
4263 l2cap_chan_del(chan
, ECONNRESET
);
4265 l2cap_chan_unlock(chan
);
4267 chan
->ops
->close(chan
);
4268 l2cap_chan_put(chan
);
4270 mutex_unlock(&conn
->chan_lock
);
4275 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4276 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4279 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4281 struct l2cap_chan
*chan
;
4283 if (cmd_len
!= sizeof(*rsp
))
4286 scid
= __le16_to_cpu(rsp
->scid
);
4287 dcid
= __le16_to_cpu(rsp
->dcid
);
4289 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4291 mutex_lock(&conn
->chan_lock
);
4293 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4295 mutex_unlock(&conn
->chan_lock
);
4299 l2cap_chan_lock(chan
);
4301 l2cap_chan_hold(chan
);
4302 l2cap_chan_del(chan
, 0);
4304 l2cap_chan_unlock(chan
);
4306 chan
->ops
->close(chan
);
4307 l2cap_chan_put(chan
);
4309 mutex_unlock(&conn
->chan_lock
);
4314 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4315 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4318 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4321 if (cmd_len
!= sizeof(*req
))
4324 type
= __le16_to_cpu(req
->type
);
4326 BT_DBG("type 0x%4.4x", type
);
4328 if (type
== L2CAP_IT_FEAT_MASK
) {
4330 u32 feat_mask
= l2cap_feat_mask
;
4331 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4332 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4333 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4335 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4337 if (conn
->local_fixed_chan
& L2CAP_FC_A2MP
)
4338 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4339 | L2CAP_FEAT_EXT_WINDOW
;
4341 put_unaligned_le32(feat_mask
, rsp
->data
);
4342 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4344 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4346 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4348 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4349 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4350 rsp
->data
[0] = conn
->local_fixed_chan
;
4351 memset(rsp
->data
+ 1, 0, 7);
4352 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4355 struct l2cap_info_rsp rsp
;
4356 rsp
.type
= cpu_to_le16(type
);
4357 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4358 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4365 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4366 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4369 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4372 if (cmd_len
< sizeof(*rsp
))
4375 type
= __le16_to_cpu(rsp
->type
);
4376 result
= __le16_to_cpu(rsp
->result
);
4378 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4380 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4381 if (cmd
->ident
!= conn
->info_ident
||
4382 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4385 cancel_delayed_work(&conn
->info_timer
);
4387 if (result
!= L2CAP_IR_SUCCESS
) {
4388 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4389 conn
->info_ident
= 0;
4391 l2cap_conn_start(conn
);
4397 case L2CAP_IT_FEAT_MASK
:
4398 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4400 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4401 struct l2cap_info_req req
;
4402 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4404 conn
->info_ident
= l2cap_get_ident(conn
);
4406 l2cap_send_cmd(conn
, conn
->info_ident
,
4407 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4409 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4410 conn
->info_ident
= 0;
4412 l2cap_conn_start(conn
);
4416 case L2CAP_IT_FIXED_CHAN
:
4417 conn
->remote_fixed_chan
= rsp
->data
[0];
4418 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4419 conn
->info_ident
= 0;
4421 l2cap_conn_start(conn
);
4428 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4429 struct l2cap_cmd_hdr
*cmd
,
4430 u16 cmd_len
, void *data
)
4432 struct l2cap_create_chan_req
*req
= data
;
4433 struct l2cap_create_chan_rsp rsp
;
4434 struct l2cap_chan
*chan
;
4435 struct hci_dev
*hdev
;
4438 if (cmd_len
!= sizeof(*req
))
4441 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4444 psm
= le16_to_cpu(req
->psm
);
4445 scid
= le16_to_cpu(req
->scid
);
4447 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4449 /* For controller id 0 make BR/EDR connection */
4450 if (req
->amp_id
== AMP_ID_BREDR
) {
4451 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4456 /* Validate AMP controller id */
4457 hdev
= hci_dev_get(req
->amp_id
);
4461 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4466 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4469 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4470 struct hci_conn
*hs_hcon
;
4472 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4476 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4481 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4483 mgr
->bredr_chan
= chan
;
4484 chan
->hs_hcon
= hs_hcon
;
4485 chan
->fcs
= L2CAP_FCS_NONE
;
4486 conn
->mtu
= hdev
->block_mtu
;
4495 rsp
.scid
= cpu_to_le16(scid
);
4496 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4497 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4499 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4505 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4507 struct l2cap_move_chan_req req
;
4510 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4512 ident
= l2cap_get_ident(chan
->conn
);
4513 chan
->ident
= ident
;
4515 req
.icid
= cpu_to_le16(chan
->scid
);
4516 req
.dest_amp_id
= dest_amp_id
;
4518 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4521 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4524 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4526 struct l2cap_move_chan_rsp rsp
;
4528 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4530 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4531 rsp
.result
= cpu_to_le16(result
);
4533 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4537 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4539 struct l2cap_move_chan_cfm cfm
;
4541 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4543 chan
->ident
= l2cap_get_ident(chan
->conn
);
4545 cfm
.icid
= cpu_to_le16(chan
->scid
);
4546 cfm
.result
= cpu_to_le16(result
);
4548 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4551 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4554 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4556 struct l2cap_move_chan_cfm cfm
;
4558 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4560 cfm
.icid
= cpu_to_le16(icid
);
4561 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4563 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4567 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4570 struct l2cap_move_chan_cfm_rsp rsp
;
4572 BT_DBG("icid 0x%4.4x", icid
);
4574 rsp
.icid
= cpu_to_le16(icid
);
4575 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4578 static void __release_logical_link(struct l2cap_chan
*chan
)
4580 chan
->hs_hchan
= NULL
;
4581 chan
->hs_hcon
= NULL
;
4583 /* Placeholder - release the logical link */
4586 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4588 /* Logical link setup failed */
4589 if (chan
->state
!= BT_CONNECTED
) {
4590 /* Create channel failure, disconnect */
4591 l2cap_send_disconn_req(chan
, ECONNRESET
);
4595 switch (chan
->move_role
) {
4596 case L2CAP_MOVE_ROLE_RESPONDER
:
4597 l2cap_move_done(chan
);
4598 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4600 case L2CAP_MOVE_ROLE_INITIATOR
:
4601 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4602 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4603 /* Remote has only sent pending or
4604 * success responses, clean up
4606 l2cap_move_done(chan
);
4609 /* Other amp move states imply that the move
4610 * has already aborted
4612 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4617 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4618 struct hci_chan
*hchan
)
4620 struct l2cap_conf_rsp rsp
;
4622 chan
->hs_hchan
= hchan
;
4623 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4625 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4627 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4630 set_default_fcs(chan
);
4632 err
= l2cap_ertm_init(chan
);
4634 l2cap_send_disconn_req(chan
, -err
);
4636 l2cap_chan_ready(chan
);
4640 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4641 struct hci_chan
*hchan
)
4643 chan
->hs_hcon
= hchan
->conn
;
4644 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4646 BT_DBG("move_state %d", chan
->move_state
);
4648 switch (chan
->move_state
) {
4649 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4650 /* Move confirm will be sent after a success
4651 * response is received
4653 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4655 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4656 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4657 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4658 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4659 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4660 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4661 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4662 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4663 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4667 /* Move was not in expected state, free the channel */
4668 __release_logical_link(chan
);
4670 chan
->move_state
= L2CAP_MOVE_STABLE
;
4674 /* Call with chan locked */
4675 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4678 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4681 l2cap_logical_fail(chan
);
4682 __release_logical_link(chan
);
4686 if (chan
->state
!= BT_CONNECTED
) {
4687 /* Ignore logical link if channel is on BR/EDR */
4688 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4689 l2cap_logical_finish_create(chan
, hchan
);
4691 l2cap_logical_finish_move(chan
, hchan
);
4695 void l2cap_move_start(struct l2cap_chan
*chan
)
4697 BT_DBG("chan %p", chan
);
4699 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4700 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4702 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4703 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4704 /* Placeholder - start physical link setup */
4706 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4707 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4709 l2cap_move_setup(chan
);
4710 l2cap_send_move_chan_req(chan
, 0);
4714 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4715 u8 local_amp_id
, u8 remote_amp_id
)
4717 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4718 local_amp_id
, remote_amp_id
);
4720 chan
->fcs
= L2CAP_FCS_NONE
;
4722 /* Outgoing channel on AMP */
4723 if (chan
->state
== BT_CONNECT
) {
4724 if (result
== L2CAP_CR_SUCCESS
) {
4725 chan
->local_amp_id
= local_amp_id
;
4726 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4728 /* Revert to BR/EDR connect */
4729 l2cap_send_conn_req(chan
);
4735 /* Incoming channel on AMP */
4736 if (__l2cap_no_conn_pending(chan
)) {
4737 struct l2cap_conn_rsp rsp
;
4739 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4740 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4742 if (result
== L2CAP_CR_SUCCESS
) {
4743 /* Send successful response */
4744 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4745 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4747 /* Send negative response */
4748 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4749 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4752 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4755 if (result
== L2CAP_CR_SUCCESS
) {
4756 l2cap_state_change(chan
, BT_CONFIG
);
4757 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4758 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4760 l2cap_build_conf_req(chan
, buf
), buf
);
4761 chan
->num_conf_req
++;
4766 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4769 l2cap_move_setup(chan
);
4770 chan
->move_id
= local_amp_id
;
4771 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4773 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4776 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4778 struct hci_chan
*hchan
= NULL
;
4780 /* Placeholder - get hci_chan for logical link */
4783 if (hchan
->state
== BT_CONNECTED
) {
4784 /* Logical link is ready to go */
4785 chan
->hs_hcon
= hchan
->conn
;
4786 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4787 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4788 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4790 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4792 /* Wait for logical link to be ready */
4793 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4796 /* Logical link not available */
4797 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4801 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4803 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4805 if (result
== -EINVAL
)
4806 rsp_result
= L2CAP_MR_BAD_ID
;
4808 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4810 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4813 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4814 chan
->move_state
= L2CAP_MOVE_STABLE
;
4816 /* Restart data transmission */
4817 l2cap_ertm_send(chan
);
4820 /* Invoke with locked chan */
4821 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4823 u8 local_amp_id
= chan
->local_amp_id
;
4824 u8 remote_amp_id
= chan
->remote_amp_id
;
4826 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4827 chan
, result
, local_amp_id
, remote_amp_id
);
4829 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4830 l2cap_chan_unlock(chan
);
4834 if (chan
->state
!= BT_CONNECTED
) {
4835 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4836 } else if (result
!= L2CAP_MR_SUCCESS
) {
4837 l2cap_do_move_cancel(chan
, result
);
4839 switch (chan
->move_role
) {
4840 case L2CAP_MOVE_ROLE_INITIATOR
:
4841 l2cap_do_move_initiate(chan
, local_amp_id
,
4844 case L2CAP_MOVE_ROLE_RESPONDER
:
4845 l2cap_do_move_respond(chan
, result
);
4848 l2cap_do_move_cancel(chan
, result
);
4854 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4855 struct l2cap_cmd_hdr
*cmd
,
4856 u16 cmd_len
, void *data
)
4858 struct l2cap_move_chan_req
*req
= data
;
4859 struct l2cap_move_chan_rsp rsp
;
4860 struct l2cap_chan
*chan
;
4862 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4864 if (cmd_len
!= sizeof(*req
))
4867 icid
= le16_to_cpu(req
->icid
);
4869 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4871 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4874 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4876 rsp
.icid
= cpu_to_le16(icid
);
4877 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4878 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4883 chan
->ident
= cmd
->ident
;
4885 if (chan
->scid
< L2CAP_CID_DYN_START
||
4886 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4887 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4888 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4889 result
= L2CAP_MR_NOT_ALLOWED
;
4890 goto send_move_response
;
4893 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4894 result
= L2CAP_MR_SAME_ID
;
4895 goto send_move_response
;
4898 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4899 struct hci_dev
*hdev
;
4900 hdev
= hci_dev_get(req
->dest_amp_id
);
4901 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4902 !test_bit(HCI_UP
, &hdev
->flags
)) {
4906 result
= L2CAP_MR_BAD_ID
;
4907 goto send_move_response
;
4912 /* Detect a move collision. Only send a collision response
4913 * if this side has "lost", otherwise proceed with the move.
4914 * The winner has the larger bd_addr.
4916 if ((__chan_is_moving(chan
) ||
4917 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4918 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4919 result
= L2CAP_MR_COLLISION
;
4920 goto send_move_response
;
4923 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4924 l2cap_move_setup(chan
);
4925 chan
->move_id
= req
->dest_amp_id
;
4928 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4929 /* Moving to BR/EDR */
4930 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4931 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4932 result
= L2CAP_MR_PEND
;
4934 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4935 result
= L2CAP_MR_SUCCESS
;
4938 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4939 /* Placeholder - uncomment when amp functions are available */
4940 /*amp_accept_physical(chan, req->dest_amp_id);*/
4941 result
= L2CAP_MR_PEND
;
4945 l2cap_send_move_chan_rsp(chan
, result
);
4947 l2cap_chan_unlock(chan
);
4952 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4954 struct l2cap_chan
*chan
;
4955 struct hci_chan
*hchan
= NULL
;
4957 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4959 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4963 __clear_chan_timer(chan
);
4964 if (result
== L2CAP_MR_PEND
)
4965 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4967 switch (chan
->move_state
) {
4968 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4969 /* Move confirm will be sent when logical link
4972 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4974 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4975 if (result
== L2CAP_MR_PEND
) {
4977 } else if (test_bit(CONN_LOCAL_BUSY
,
4978 &chan
->conn_state
)) {
4979 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4981 /* Logical link is up or moving to BR/EDR,
4984 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4985 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4988 case L2CAP_MOVE_WAIT_RSP
:
4990 if (result
== L2CAP_MR_SUCCESS
) {
4991 /* Remote is ready, send confirm immediately
4992 * after logical link is ready
4994 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4996 /* Both logical link and move success
4997 * are required to confirm
4999 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5002 /* Placeholder - get hci_chan for logical link */
5004 /* Logical link not available */
5005 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5009 /* If the logical link is not yet connected, do not
5010 * send confirmation.
5012 if (hchan
->state
!= BT_CONNECTED
)
5015 /* Logical link is already ready to go */
5017 chan
->hs_hcon
= hchan
->conn
;
5018 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5020 if (result
== L2CAP_MR_SUCCESS
) {
5021 /* Can confirm now */
5022 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5024 /* Now only need move success
5027 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5030 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5033 /* Any other amp move state means the move failed. */
5034 chan
->move_id
= chan
->local_amp_id
;
5035 l2cap_move_done(chan
);
5036 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5039 l2cap_chan_unlock(chan
);
5042 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5045 struct l2cap_chan
*chan
;
5047 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5049 /* Could not locate channel, icid is best guess */
5050 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5054 __clear_chan_timer(chan
);
5056 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5057 if (result
== L2CAP_MR_COLLISION
) {
5058 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5060 /* Cleanup - cancel move */
5061 chan
->move_id
= chan
->local_amp_id
;
5062 l2cap_move_done(chan
);
5066 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5068 l2cap_chan_unlock(chan
);
5071 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5072 struct l2cap_cmd_hdr
*cmd
,
5073 u16 cmd_len
, void *data
)
5075 struct l2cap_move_chan_rsp
*rsp
= data
;
5078 if (cmd_len
!= sizeof(*rsp
))
5081 icid
= le16_to_cpu(rsp
->icid
);
5082 result
= le16_to_cpu(rsp
->result
);
5084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5086 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5087 l2cap_move_continue(conn
, icid
, result
);
5089 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5094 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5095 struct l2cap_cmd_hdr
*cmd
,
5096 u16 cmd_len
, void *data
)
5098 struct l2cap_move_chan_cfm
*cfm
= data
;
5099 struct l2cap_chan
*chan
;
5102 if (cmd_len
!= sizeof(*cfm
))
5105 icid
= le16_to_cpu(cfm
->icid
);
5106 result
= le16_to_cpu(cfm
->result
);
5108 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5110 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5112 /* Spec requires a response even if the icid was not found */
5113 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5117 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5118 if (result
== L2CAP_MC_CONFIRMED
) {
5119 chan
->local_amp_id
= chan
->move_id
;
5120 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5121 __release_logical_link(chan
);
5123 chan
->move_id
= chan
->local_amp_id
;
5126 l2cap_move_done(chan
);
5129 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5131 l2cap_chan_unlock(chan
);
5136 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5137 struct l2cap_cmd_hdr
*cmd
,
5138 u16 cmd_len
, void *data
)
5140 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5141 struct l2cap_chan
*chan
;
5144 if (cmd_len
!= sizeof(*rsp
))
5147 icid
= le16_to_cpu(rsp
->icid
);
5149 BT_DBG("icid 0x%4.4x", icid
);
5151 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5155 __clear_chan_timer(chan
);
5157 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5158 chan
->local_amp_id
= chan
->move_id
;
5160 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5161 __release_logical_link(chan
);
5163 l2cap_move_done(chan
);
5166 l2cap_chan_unlock(chan
);
5171 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5172 struct l2cap_cmd_hdr
*cmd
,
5173 u16 cmd_len
, u8
*data
)
5175 struct hci_conn
*hcon
= conn
->hcon
;
5176 struct l2cap_conn_param_update_req
*req
;
5177 struct l2cap_conn_param_update_rsp rsp
;
5178 u16 min
, max
, latency
, to_multiplier
;
5181 if (hcon
->role
!= HCI_ROLE_MASTER
)
5184 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5187 req
= (struct l2cap_conn_param_update_req
*) data
;
5188 min
= __le16_to_cpu(req
->min
);
5189 max
= __le16_to_cpu(req
->max
);
5190 latency
= __le16_to_cpu(req
->latency
);
5191 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5193 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5194 min
, max
, latency
, to_multiplier
);
5196 memset(&rsp
, 0, sizeof(rsp
));
5198 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5200 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5202 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5204 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5210 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5212 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5213 store_hint
, min
, max
, latency
,
5221 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5222 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5225 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5226 struct hci_conn
*hcon
= conn
->hcon
;
5227 u16 dcid
, mtu
, mps
, credits
, result
;
5228 struct l2cap_chan
*chan
;
5231 if (cmd_len
< sizeof(*rsp
))
5234 dcid
= __le16_to_cpu(rsp
->dcid
);
5235 mtu
= __le16_to_cpu(rsp
->mtu
);
5236 mps
= __le16_to_cpu(rsp
->mps
);
5237 credits
= __le16_to_cpu(rsp
->credits
);
5238 result
= __le16_to_cpu(rsp
->result
);
5240 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5243 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5244 dcid
, mtu
, mps
, credits
, result
);
5246 mutex_lock(&conn
->chan_lock
);
5248 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5256 l2cap_chan_lock(chan
);
5259 case L2CAP_CR_SUCCESS
:
5263 chan
->remote_mps
= mps
;
5264 chan
->tx_credits
= credits
;
5265 l2cap_chan_ready(chan
);
5268 case L2CAP_CR_AUTHENTICATION
:
5269 case L2CAP_CR_ENCRYPTION
:
5270 /* If we already have MITM protection we can't do
5273 if (hcon
->sec_level
> BT_SECURITY_MEDIUM
) {
5274 l2cap_chan_del(chan
, ECONNREFUSED
);
5278 sec_level
= hcon
->sec_level
+ 1;
5279 if (chan
->sec_level
< sec_level
)
5280 chan
->sec_level
= sec_level
;
5282 /* We'll need to send a new Connect Request */
5283 clear_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
);
5285 smp_conn_security(hcon
, chan
->sec_level
);
5289 l2cap_chan_del(chan
, ECONNREFUSED
);
5293 l2cap_chan_unlock(chan
);
5296 mutex_unlock(&conn
->chan_lock
);
5301 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5302 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5307 switch (cmd
->code
) {
5308 case L2CAP_COMMAND_REJ
:
5309 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5312 case L2CAP_CONN_REQ
:
5313 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5316 case L2CAP_CONN_RSP
:
5317 case L2CAP_CREATE_CHAN_RSP
:
5318 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5321 case L2CAP_CONF_REQ
:
5322 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5325 case L2CAP_CONF_RSP
:
5326 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5329 case L2CAP_DISCONN_REQ
:
5330 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5333 case L2CAP_DISCONN_RSP
:
5334 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5337 case L2CAP_ECHO_REQ
:
5338 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5341 case L2CAP_ECHO_RSP
:
5344 case L2CAP_INFO_REQ
:
5345 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5348 case L2CAP_INFO_RSP
:
5349 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5352 case L2CAP_CREATE_CHAN_REQ
:
5353 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5356 case L2CAP_MOVE_CHAN_REQ
:
5357 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5360 case L2CAP_MOVE_CHAN_RSP
:
5361 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5364 case L2CAP_MOVE_CHAN_CFM
:
5365 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5368 case L2CAP_MOVE_CHAN_CFM_RSP
:
5369 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5373 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5381 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5382 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5385 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5386 struct l2cap_le_conn_rsp rsp
;
5387 struct l2cap_chan
*chan
, *pchan
;
5388 u16 dcid
, scid
, credits
, mtu
, mps
;
5392 if (cmd_len
!= sizeof(*req
))
5395 scid
= __le16_to_cpu(req
->scid
);
5396 mtu
= __le16_to_cpu(req
->mtu
);
5397 mps
= __le16_to_cpu(req
->mps
);
5402 if (mtu
< 23 || mps
< 23)
5405 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5408 /* Check if we have socket listening on psm */
5409 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5410 &conn
->hcon
->dst
, LE_LINK
);
5412 result
= L2CAP_CR_BAD_PSM
;
5417 mutex_lock(&conn
->chan_lock
);
5418 l2cap_chan_lock(pchan
);
5420 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
,
5422 result
= L2CAP_CR_AUTHENTICATION
;
5424 goto response_unlock
;
5427 /* Check if we already have channel with that dcid */
5428 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5429 result
= L2CAP_CR_NO_MEM
;
5431 goto response_unlock
;
5434 chan
= pchan
->ops
->new_connection(pchan
);
5436 result
= L2CAP_CR_NO_MEM
;
5437 goto response_unlock
;
5440 l2cap_le_flowctl_init(chan
);
5442 bacpy(&chan
->src
, &conn
->hcon
->src
);
5443 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5444 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5445 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5449 chan
->remote_mps
= mps
;
5450 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5452 __l2cap_chan_add(conn
, chan
);
5454 credits
= chan
->rx_credits
;
5456 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5458 chan
->ident
= cmd
->ident
;
5460 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5461 l2cap_state_change(chan
, BT_CONNECT2
);
5462 /* The following result value is actually not defined
5463 * for LE CoC but we use it to let the function know
5464 * that it should bail out after doing its cleanup
5465 * instead of sending a response.
5467 result
= L2CAP_CR_PEND
;
5468 chan
->ops
->defer(chan
);
5470 l2cap_chan_ready(chan
);
5471 result
= L2CAP_CR_SUCCESS
;
5475 l2cap_chan_unlock(pchan
);
5476 mutex_unlock(&conn
->chan_lock
);
5477 l2cap_chan_put(pchan
);
5479 if (result
== L2CAP_CR_PEND
)
5484 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5485 rsp
.mps
= cpu_to_le16(chan
->mps
);
5491 rsp
.dcid
= cpu_to_le16(dcid
);
5492 rsp
.credits
= cpu_to_le16(credits
);
5493 rsp
.result
= cpu_to_le16(result
);
5495 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5500 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5501 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5504 struct l2cap_le_credits
*pkt
;
5505 struct l2cap_chan
*chan
;
5506 u16 cid
, credits
, max_credits
;
5508 if (cmd_len
!= sizeof(*pkt
))
5511 pkt
= (struct l2cap_le_credits
*) data
;
5512 cid
= __le16_to_cpu(pkt
->cid
);
5513 credits
= __le16_to_cpu(pkt
->credits
);
5515 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5517 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5521 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5522 if (credits
> max_credits
) {
5523 BT_ERR("LE credits overflow");
5524 l2cap_send_disconn_req(chan
, ECONNRESET
);
5525 l2cap_chan_unlock(chan
);
5527 /* Return 0 so that we don't trigger an unnecessary
5528 * command reject packet.
5533 chan
->tx_credits
+= credits
;
5535 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5536 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5540 if (chan
->tx_credits
)
5541 chan
->ops
->resume(chan
);
5543 l2cap_chan_unlock(chan
);
5548 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5549 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5552 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5553 struct l2cap_chan
*chan
;
5555 if (cmd_len
< sizeof(*rej
))
5558 mutex_lock(&conn
->chan_lock
);
5560 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5564 l2cap_chan_lock(chan
);
5565 l2cap_chan_del(chan
, ECONNREFUSED
);
5566 l2cap_chan_unlock(chan
);
5569 mutex_unlock(&conn
->chan_lock
);
5573 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5574 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5579 switch (cmd
->code
) {
5580 case L2CAP_COMMAND_REJ
:
5581 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5584 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5585 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5588 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5591 case L2CAP_LE_CONN_RSP
:
5592 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5595 case L2CAP_LE_CONN_REQ
:
5596 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5599 case L2CAP_LE_CREDITS
:
5600 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5603 case L2CAP_DISCONN_REQ
:
5604 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5607 case L2CAP_DISCONN_RSP
:
5608 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5612 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5620 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5621 struct sk_buff
*skb
)
5623 struct hci_conn
*hcon
= conn
->hcon
;
5624 struct l2cap_cmd_hdr
*cmd
;
5628 if (hcon
->type
!= LE_LINK
)
5631 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5634 cmd
= (void *) skb
->data
;
5635 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5637 len
= le16_to_cpu(cmd
->len
);
5639 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5641 if (len
!= skb
->len
|| !cmd
->ident
) {
5642 BT_DBG("corrupted command");
5646 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5648 struct l2cap_cmd_rej_unk rej
;
5650 BT_ERR("Wrong link type (%d)", err
);
5652 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5653 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5661 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5662 struct sk_buff
*skb
)
5664 struct hci_conn
*hcon
= conn
->hcon
;
5665 u8
*data
= skb
->data
;
5667 struct l2cap_cmd_hdr cmd
;
5670 l2cap_raw_recv(conn
, skb
);
5672 if (hcon
->type
!= ACL_LINK
)
5675 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5677 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5678 data
+= L2CAP_CMD_HDR_SIZE
;
5679 len
-= L2CAP_CMD_HDR_SIZE
;
5681 cmd_len
= le16_to_cpu(cmd
.len
);
5683 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5686 if (cmd_len
> len
|| !cmd
.ident
) {
5687 BT_DBG("corrupted command");
5691 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5693 struct l2cap_cmd_rej_unk rej
;
5695 BT_ERR("Wrong link type (%d)", err
);
5697 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5698 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5710 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5712 u16 our_fcs
, rcv_fcs
;
5715 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5716 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5718 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5720 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5721 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5722 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5723 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5725 if (our_fcs
!= rcv_fcs
)
5731 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5733 struct l2cap_ctrl control
;
5735 BT_DBG("chan %p", chan
);
5737 memset(&control
, 0, sizeof(control
));
5740 control
.reqseq
= chan
->buffer_seq
;
5741 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5743 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5744 control
.super
= L2CAP_SUPER_RNR
;
5745 l2cap_send_sframe(chan
, &control
);
5748 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5749 chan
->unacked_frames
> 0)
5750 __set_retrans_timer(chan
);
5752 /* Send pending iframes */
5753 l2cap_ertm_send(chan
);
5755 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5756 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5757 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5760 control
.super
= L2CAP_SUPER_RR
;
5761 l2cap_send_sframe(chan
, &control
);
5765 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5766 struct sk_buff
**last_frag
)
5768 /* skb->len reflects data in skb as well as all fragments
5769 * skb->data_len reflects only data in fragments
5771 if (!skb_has_frag_list(skb
))
5772 skb_shinfo(skb
)->frag_list
= new_frag
;
5774 new_frag
->next
= NULL
;
5776 (*last_frag
)->next
= new_frag
;
5777 *last_frag
= new_frag
;
5779 skb
->len
+= new_frag
->len
;
5780 skb
->data_len
+= new_frag
->len
;
5781 skb
->truesize
+= new_frag
->truesize
;
5784 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5785 struct l2cap_ctrl
*control
)
5789 switch (control
->sar
) {
5790 case L2CAP_SAR_UNSEGMENTED
:
5794 err
= chan
->ops
->recv(chan
, skb
);
5797 case L2CAP_SAR_START
:
5801 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5802 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5804 if (chan
->sdu_len
> chan
->imtu
) {
5809 if (skb
->len
>= chan
->sdu_len
)
5813 chan
->sdu_last_frag
= skb
;
5819 case L2CAP_SAR_CONTINUE
:
5823 append_skb_frag(chan
->sdu
, skb
,
5824 &chan
->sdu_last_frag
);
5827 if (chan
->sdu
->len
>= chan
->sdu_len
)
5837 append_skb_frag(chan
->sdu
, skb
,
5838 &chan
->sdu_last_frag
);
5841 if (chan
->sdu
->len
!= chan
->sdu_len
)
5844 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5847 /* Reassembly complete */
5849 chan
->sdu_last_frag
= NULL
;
5857 kfree_skb(chan
->sdu
);
5859 chan
->sdu_last_frag
= NULL
;
5866 static int l2cap_resegment(struct l2cap_chan
*chan
)
5872 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5876 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5879 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5880 l2cap_tx(chan
, NULL
, NULL
, event
);
5883 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5886 /* Pass sequential frames to l2cap_reassemble_sdu()
5887 * until a gap is encountered.
5890 BT_DBG("chan %p", chan
);
5892 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5893 struct sk_buff
*skb
;
5894 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5895 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5897 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5902 skb_unlink(skb
, &chan
->srej_q
);
5903 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5904 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5909 if (skb_queue_empty(&chan
->srej_q
)) {
5910 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5911 l2cap_send_ack(chan
);
5917 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5918 struct l2cap_ctrl
*control
)
5920 struct sk_buff
*skb
;
5922 BT_DBG("chan %p, control %p", chan
, control
);
5924 if (control
->reqseq
== chan
->next_tx_seq
) {
5925 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5926 l2cap_send_disconn_req(chan
, ECONNRESET
);
5930 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5933 BT_DBG("Seq %d not available for retransmission",
5938 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5939 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5940 l2cap_send_disconn_req(chan
, ECONNRESET
);
5944 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5946 if (control
->poll
) {
5947 l2cap_pass_to_tx(chan
, control
);
5949 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5950 l2cap_retransmit(chan
, control
);
5951 l2cap_ertm_send(chan
);
5953 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5954 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5955 chan
->srej_save_reqseq
= control
->reqseq
;
5958 l2cap_pass_to_tx_fbit(chan
, control
);
5960 if (control
->final
) {
5961 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5962 !test_and_clear_bit(CONN_SREJ_ACT
,
5964 l2cap_retransmit(chan
, control
);
5966 l2cap_retransmit(chan
, control
);
5967 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5968 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5969 chan
->srej_save_reqseq
= control
->reqseq
;
5975 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5976 struct l2cap_ctrl
*control
)
5978 struct sk_buff
*skb
;
5980 BT_DBG("chan %p, control %p", chan
, control
);
5982 if (control
->reqseq
== chan
->next_tx_seq
) {
5983 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5984 l2cap_send_disconn_req(chan
, ECONNRESET
);
5988 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5990 if (chan
->max_tx
&& skb
&&
5991 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5992 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5993 l2cap_send_disconn_req(chan
, ECONNRESET
);
5997 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5999 l2cap_pass_to_tx(chan
, control
);
6001 if (control
->final
) {
6002 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6003 l2cap_retransmit_all(chan
, control
);
6005 l2cap_retransmit_all(chan
, control
);
6006 l2cap_ertm_send(chan
);
6007 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6008 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6012 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6014 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6016 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6017 chan
->expected_tx_seq
);
6019 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6020 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6022 /* See notes below regarding "double poll" and
6025 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6026 BT_DBG("Invalid/Ignore - after SREJ");
6027 return L2CAP_TXSEQ_INVALID_IGNORE
;
6029 BT_DBG("Invalid - in window after SREJ sent");
6030 return L2CAP_TXSEQ_INVALID
;
6034 if (chan
->srej_list
.head
== txseq
) {
6035 BT_DBG("Expected SREJ");
6036 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6039 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6040 BT_DBG("Duplicate SREJ - txseq already stored");
6041 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6044 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6045 BT_DBG("Unexpected SREJ - not requested");
6046 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6050 if (chan
->expected_tx_seq
== txseq
) {
6051 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6053 BT_DBG("Invalid - txseq outside tx window");
6054 return L2CAP_TXSEQ_INVALID
;
6057 return L2CAP_TXSEQ_EXPECTED
;
6061 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6062 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6063 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6064 return L2CAP_TXSEQ_DUPLICATE
;
6067 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6068 /* A source of invalid packets is a "double poll" condition,
6069 * where delays cause us to send multiple poll packets. If
6070 * the remote stack receives and processes both polls,
6071 * sequence numbers can wrap around in such a way that a
6072 * resent frame has a sequence number that looks like new data
6073 * with a sequence gap. This would trigger an erroneous SREJ
6076 * Fortunately, this is impossible with a tx window that's
6077 * less than half of the maximum sequence number, which allows
6078 * invalid frames to be safely ignored.
6080 * With tx window sizes greater than half of the tx window
6081 * maximum, the frame is invalid and cannot be ignored. This
6082 * causes a disconnect.
6085 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6086 BT_DBG("Invalid/Ignore - txseq outside tx window");
6087 return L2CAP_TXSEQ_INVALID_IGNORE
;
6089 BT_DBG("Invalid - txseq outside tx window");
6090 return L2CAP_TXSEQ_INVALID
;
6093 BT_DBG("Unexpected - txseq indicates missing frames");
6094 return L2CAP_TXSEQ_UNEXPECTED
;
6098 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6099 struct l2cap_ctrl
*control
,
6100 struct sk_buff
*skb
, u8 event
)
6103 bool skb_in_use
= false;
6105 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6109 case L2CAP_EV_RECV_IFRAME
:
6110 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6111 case L2CAP_TXSEQ_EXPECTED
:
6112 l2cap_pass_to_tx(chan
, control
);
6114 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6115 BT_DBG("Busy, discarding expected seq %d",
6120 chan
->expected_tx_seq
= __next_seq(chan
,
6123 chan
->buffer_seq
= chan
->expected_tx_seq
;
6126 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6130 if (control
->final
) {
6131 if (!test_and_clear_bit(CONN_REJ_ACT
,
6132 &chan
->conn_state
)) {
6134 l2cap_retransmit_all(chan
, control
);
6135 l2cap_ertm_send(chan
);
6139 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6140 l2cap_send_ack(chan
);
6142 case L2CAP_TXSEQ_UNEXPECTED
:
6143 l2cap_pass_to_tx(chan
, control
);
6145 /* Can't issue SREJ frames in the local busy state.
6146 * Drop this frame, it will be seen as missing
6147 * when local busy is exited.
6149 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6150 BT_DBG("Busy, discarding unexpected seq %d",
6155 /* There was a gap in the sequence, so an SREJ
6156 * must be sent for each missing frame. The
6157 * current frame is stored for later use.
6159 skb_queue_tail(&chan
->srej_q
, skb
);
6161 BT_DBG("Queued %p (queue len %d)", skb
,
6162 skb_queue_len(&chan
->srej_q
));
6164 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6165 l2cap_seq_list_clear(&chan
->srej_list
);
6166 l2cap_send_srej(chan
, control
->txseq
);
6168 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6170 case L2CAP_TXSEQ_DUPLICATE
:
6171 l2cap_pass_to_tx(chan
, control
);
6173 case L2CAP_TXSEQ_INVALID_IGNORE
:
6175 case L2CAP_TXSEQ_INVALID
:
6177 l2cap_send_disconn_req(chan
, ECONNRESET
);
6181 case L2CAP_EV_RECV_RR
:
6182 l2cap_pass_to_tx(chan
, control
);
6183 if (control
->final
) {
6184 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6186 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6187 !__chan_is_moving(chan
)) {
6189 l2cap_retransmit_all(chan
, control
);
6192 l2cap_ertm_send(chan
);
6193 } else if (control
->poll
) {
6194 l2cap_send_i_or_rr_or_rnr(chan
);
6196 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6197 &chan
->conn_state
) &&
6198 chan
->unacked_frames
)
6199 __set_retrans_timer(chan
);
6201 l2cap_ertm_send(chan
);
6204 case L2CAP_EV_RECV_RNR
:
6205 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6206 l2cap_pass_to_tx(chan
, control
);
6207 if (control
&& control
->poll
) {
6208 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6209 l2cap_send_rr_or_rnr(chan
, 0);
6211 __clear_retrans_timer(chan
);
6212 l2cap_seq_list_clear(&chan
->retrans_list
);
6214 case L2CAP_EV_RECV_REJ
:
6215 l2cap_handle_rej(chan
, control
);
6217 case L2CAP_EV_RECV_SREJ
:
6218 l2cap_handle_srej(chan
, control
);
6224 if (skb
&& !skb_in_use
) {
6225 BT_DBG("Freeing %p", skb
);
6232 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6233 struct l2cap_ctrl
*control
,
6234 struct sk_buff
*skb
, u8 event
)
6237 u16 txseq
= control
->txseq
;
6238 bool skb_in_use
= false;
6240 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6244 case L2CAP_EV_RECV_IFRAME
:
6245 switch (l2cap_classify_txseq(chan
, txseq
)) {
6246 case L2CAP_TXSEQ_EXPECTED
:
6247 /* Keep frame for reassembly later */
6248 l2cap_pass_to_tx(chan
, control
);
6249 skb_queue_tail(&chan
->srej_q
, skb
);
6251 BT_DBG("Queued %p (queue len %d)", skb
,
6252 skb_queue_len(&chan
->srej_q
));
6254 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6256 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6257 l2cap_seq_list_pop(&chan
->srej_list
);
6259 l2cap_pass_to_tx(chan
, control
);
6260 skb_queue_tail(&chan
->srej_q
, skb
);
6262 BT_DBG("Queued %p (queue len %d)", skb
,
6263 skb_queue_len(&chan
->srej_q
));
6265 err
= l2cap_rx_queued_iframes(chan
);
6270 case L2CAP_TXSEQ_UNEXPECTED
:
6271 /* Got a frame that can't be reassembled yet.
6272 * Save it for later, and send SREJs to cover
6273 * the missing frames.
6275 skb_queue_tail(&chan
->srej_q
, skb
);
6277 BT_DBG("Queued %p (queue len %d)", skb
,
6278 skb_queue_len(&chan
->srej_q
));
6280 l2cap_pass_to_tx(chan
, control
);
6281 l2cap_send_srej(chan
, control
->txseq
);
6283 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6284 /* This frame was requested with an SREJ, but
6285 * some expected retransmitted frames are
6286 * missing. Request retransmission of missing
6289 skb_queue_tail(&chan
->srej_q
, skb
);
6291 BT_DBG("Queued %p (queue len %d)", skb
,
6292 skb_queue_len(&chan
->srej_q
));
6294 l2cap_pass_to_tx(chan
, control
);
6295 l2cap_send_srej_list(chan
, control
->txseq
);
6297 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6298 /* We've already queued this frame. Drop this copy. */
6299 l2cap_pass_to_tx(chan
, control
);
6301 case L2CAP_TXSEQ_DUPLICATE
:
6302 /* Expecting a later sequence number, so this frame
6303 * was already received. Ignore it completely.
6306 case L2CAP_TXSEQ_INVALID_IGNORE
:
6308 case L2CAP_TXSEQ_INVALID
:
6310 l2cap_send_disconn_req(chan
, ECONNRESET
);
6314 case L2CAP_EV_RECV_RR
:
6315 l2cap_pass_to_tx(chan
, control
);
6316 if (control
->final
) {
6317 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6319 if (!test_and_clear_bit(CONN_REJ_ACT
,
6320 &chan
->conn_state
)) {
6322 l2cap_retransmit_all(chan
, control
);
6325 l2cap_ertm_send(chan
);
6326 } else if (control
->poll
) {
6327 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6328 &chan
->conn_state
) &&
6329 chan
->unacked_frames
) {
6330 __set_retrans_timer(chan
);
6333 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6334 l2cap_send_srej_tail(chan
);
6336 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6337 &chan
->conn_state
) &&
6338 chan
->unacked_frames
)
6339 __set_retrans_timer(chan
);
6341 l2cap_send_ack(chan
);
6344 case L2CAP_EV_RECV_RNR
:
6345 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6346 l2cap_pass_to_tx(chan
, control
);
6347 if (control
->poll
) {
6348 l2cap_send_srej_tail(chan
);
6350 struct l2cap_ctrl rr_control
;
6351 memset(&rr_control
, 0, sizeof(rr_control
));
6352 rr_control
.sframe
= 1;
6353 rr_control
.super
= L2CAP_SUPER_RR
;
6354 rr_control
.reqseq
= chan
->buffer_seq
;
6355 l2cap_send_sframe(chan
, &rr_control
);
6359 case L2CAP_EV_RECV_REJ
:
6360 l2cap_handle_rej(chan
, control
);
6362 case L2CAP_EV_RECV_SREJ
:
6363 l2cap_handle_srej(chan
, control
);
6367 if (skb
&& !skb_in_use
) {
6368 BT_DBG("Freeing %p", skb
);
6375 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6377 BT_DBG("chan %p", chan
);
6379 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6382 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6384 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6386 return l2cap_resegment(chan
);
6389 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6390 struct l2cap_ctrl
*control
,
6391 struct sk_buff
*skb
, u8 event
)
6395 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6401 l2cap_process_reqseq(chan
, control
->reqseq
);
6403 if (!skb_queue_empty(&chan
->tx_q
))
6404 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6406 chan
->tx_send_head
= NULL
;
6408 /* Rewind next_tx_seq to the point expected
6411 chan
->next_tx_seq
= control
->reqseq
;
6412 chan
->unacked_frames
= 0;
6414 err
= l2cap_finish_move(chan
);
6418 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6419 l2cap_send_i_or_rr_or_rnr(chan
);
6421 if (event
== L2CAP_EV_RECV_IFRAME
)
6424 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6427 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6428 struct l2cap_ctrl
*control
,
6429 struct sk_buff
*skb
, u8 event
)
6433 if (!control
->final
)
6436 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6438 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6439 l2cap_process_reqseq(chan
, control
->reqseq
);
6441 if (!skb_queue_empty(&chan
->tx_q
))
6442 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6444 chan
->tx_send_head
= NULL
;
6446 /* Rewind next_tx_seq to the point expected
6449 chan
->next_tx_seq
= control
->reqseq
;
6450 chan
->unacked_frames
= 0;
6453 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6455 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6457 err
= l2cap_resegment(chan
);
6460 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6465 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6467 /* Make sure reqseq is for a packet that has been sent but not acked */
6470 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6471 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6474 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6475 struct sk_buff
*skb
, u8 event
)
6479 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6480 control
, skb
, event
, chan
->rx_state
);
6482 if (__valid_reqseq(chan
, control
->reqseq
)) {
6483 switch (chan
->rx_state
) {
6484 case L2CAP_RX_STATE_RECV
:
6485 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6487 case L2CAP_RX_STATE_SREJ_SENT
:
6488 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6491 case L2CAP_RX_STATE_WAIT_P
:
6492 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6494 case L2CAP_RX_STATE_WAIT_F
:
6495 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6502 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6503 control
->reqseq
, chan
->next_tx_seq
,
6504 chan
->expected_ack_seq
);
6505 l2cap_send_disconn_req(chan
, ECONNRESET
);
6511 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6512 struct sk_buff
*skb
)
6516 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6519 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6520 L2CAP_TXSEQ_EXPECTED
) {
6521 l2cap_pass_to_tx(chan
, control
);
6523 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6524 __next_seq(chan
, chan
->buffer_seq
));
6526 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6528 l2cap_reassemble_sdu(chan
, skb
, control
);
6531 kfree_skb(chan
->sdu
);
6534 chan
->sdu_last_frag
= NULL
;
6538 BT_DBG("Freeing %p", skb
);
6543 chan
->last_acked_seq
= control
->txseq
;
6544 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6549 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6551 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6555 __unpack_control(chan
, skb
);
6560 * We can just drop the corrupted I-frame here.
6561 * Receiver will miss it and start proper recovery
6562 * procedures and ask for retransmission.
6564 if (l2cap_check_fcs(chan
, skb
))
6567 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6568 len
-= L2CAP_SDULEN_SIZE
;
6570 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6571 len
-= L2CAP_FCS_SIZE
;
6573 if (len
> chan
->mps
) {
6574 l2cap_send_disconn_req(chan
, ECONNRESET
);
6578 if (!control
->sframe
) {
6581 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6582 control
->sar
, control
->reqseq
, control
->final
,
6585 /* Validate F-bit - F=0 always valid, F=1 only
6586 * valid in TX WAIT_F
6588 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6591 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6592 event
= L2CAP_EV_RECV_IFRAME
;
6593 err
= l2cap_rx(chan
, control
, skb
, event
);
6595 err
= l2cap_stream_rx(chan
, control
, skb
);
6599 l2cap_send_disconn_req(chan
, ECONNRESET
);
6601 const u8 rx_func_to_event
[4] = {
6602 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6603 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6606 /* Only I-frames are expected in streaming mode */
6607 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6610 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6611 control
->reqseq
, control
->final
, control
->poll
,
6615 BT_ERR("Trailing bytes: %d in sframe", len
);
6616 l2cap_send_disconn_req(chan
, ECONNRESET
);
6620 /* Validate F and P bits */
6621 if (control
->final
&& (control
->poll
||
6622 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6625 event
= rx_func_to_event
[control
->super
];
6626 if (l2cap_rx(chan
, control
, skb
, event
))
6627 l2cap_send_disconn_req(chan
, ECONNRESET
);
6637 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6639 struct l2cap_conn
*conn
= chan
->conn
;
6640 struct l2cap_le_credits pkt
;
6643 /* We return more credits to the sender only after the amount of
6644 * credits falls below half of the initial amount.
6646 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6649 return_credits
= le_max_credits
- chan
->rx_credits
;
6651 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6653 chan
->rx_credits
+= return_credits
;
6655 pkt
.cid
= cpu_to_le16(chan
->scid
);
6656 pkt
.credits
= cpu_to_le16(return_credits
);
6658 chan
->ident
= l2cap_get_ident(conn
);
6660 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6663 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6667 if (!chan
->rx_credits
) {
6668 BT_ERR("No credits to receive LE L2CAP data");
6669 l2cap_send_disconn_req(chan
, ECONNRESET
);
6673 if (chan
->imtu
< skb
->len
) {
6674 BT_ERR("Too big LE L2CAP PDU");
6679 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6681 l2cap_chan_le_send_credits(chan
);
6688 sdu_len
= get_unaligned_le16(skb
->data
);
6689 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6691 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6692 sdu_len
, skb
->len
, chan
->imtu
);
6694 if (sdu_len
> chan
->imtu
) {
6695 BT_ERR("Too big LE L2CAP SDU length received");
6700 if (skb
->len
> sdu_len
) {
6701 BT_ERR("Too much LE L2CAP data received");
6706 if (skb
->len
== sdu_len
)
6707 return chan
->ops
->recv(chan
, skb
);
6710 chan
->sdu_len
= sdu_len
;
6711 chan
->sdu_last_frag
= skb
;
6716 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6717 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6719 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6720 BT_ERR("Too much LE L2CAP data received");
6725 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6728 if (chan
->sdu
->len
== chan
->sdu_len
) {
6729 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6732 chan
->sdu_last_frag
= NULL
;
6740 kfree_skb(chan
->sdu
);
6742 chan
->sdu_last_frag
= NULL
;
6746 /* We can't return an error here since we took care of the skb
6747 * freeing internally. An error return would cause the caller to
6748 * do a double-free of the skb.
6753 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6754 struct sk_buff
*skb
)
6756 struct l2cap_chan
*chan
;
6758 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6760 if (cid
== L2CAP_CID_A2MP
) {
6761 chan
= a2mp_channel_create(conn
, skb
);
6767 l2cap_chan_lock(chan
);
6769 BT_DBG("unknown cid 0x%4.4x", cid
);
6770 /* Drop packet and return */
6776 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6778 if (chan
->state
!= BT_CONNECTED
)
6781 switch (chan
->mode
) {
6782 case L2CAP_MODE_LE_FLOWCTL
:
6783 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6788 case L2CAP_MODE_BASIC
:
6789 /* If socket recv buffers overflows we drop data here
6790 * which is *bad* because L2CAP has to be reliable.
6791 * But we don't have any other choice. L2CAP doesn't
6792 * provide flow control mechanism. */
6794 if (chan
->imtu
< skb
->len
) {
6795 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6799 if (!chan
->ops
->recv(chan
, skb
))
6803 case L2CAP_MODE_ERTM
:
6804 case L2CAP_MODE_STREAMING
:
6805 l2cap_data_rcv(chan
, skb
);
6809 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6817 l2cap_chan_unlock(chan
);
6820 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6821 struct sk_buff
*skb
)
6823 struct hci_conn
*hcon
= conn
->hcon
;
6824 struct l2cap_chan
*chan
;
6826 if (hcon
->type
!= ACL_LINK
)
6829 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6834 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6836 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6839 if (chan
->imtu
< skb
->len
)
6842 /* Store remote BD_ADDR and PSM for msg_name */
6843 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6844 bt_cb(skb
)->psm
= psm
;
6846 if (!chan
->ops
->recv(chan
, skb
)) {
6847 l2cap_chan_put(chan
);
6852 l2cap_chan_put(chan
);
6857 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6859 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6860 struct hci_conn
*hcon
= conn
->hcon
;
6864 if (hcon
->state
!= BT_CONNECTED
) {
6865 BT_DBG("queueing pending rx skb");
6866 skb_queue_tail(&conn
->pending_rx
, skb
);
6870 skb_pull(skb
, L2CAP_HDR_SIZE
);
6871 cid
= __le16_to_cpu(lh
->cid
);
6872 len
= __le16_to_cpu(lh
->len
);
6874 if (len
!= skb
->len
) {
6879 /* Since we can't actively block incoming LE connections we must
6880 * at least ensure that we ignore incoming data from them.
6882 if (hcon
->type
== LE_LINK
&&
6883 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6884 bdaddr_type(hcon
, hcon
->dst_type
))) {
6889 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6892 case L2CAP_CID_SIGNALING
:
6893 l2cap_sig_channel(conn
, skb
);
6896 case L2CAP_CID_CONN_LESS
:
6897 psm
= get_unaligned((__le16
*) skb
->data
);
6898 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6899 l2cap_conless_channel(conn
, psm
, skb
);
6902 case L2CAP_CID_LE_SIGNALING
:
6903 l2cap_le_sig_channel(conn
, skb
);
6907 l2cap_data_channel(conn
, cid
, skb
);
6912 static void process_pending_rx(struct work_struct
*work
)
6914 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6916 struct sk_buff
*skb
;
6920 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6921 l2cap_recv_frame(conn
, skb
);
6924 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6926 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6927 struct hci_chan
*hchan
;
6932 hchan
= hci_chan_create(hcon
);
6936 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6938 hci_chan_del(hchan
);
6942 kref_init(&conn
->ref
);
6943 hcon
->l2cap_data
= conn
;
6944 conn
->hcon
= hci_conn_get(hcon
);
6945 conn
->hchan
= hchan
;
6947 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6949 switch (hcon
->type
) {
6951 if (hcon
->hdev
->le_mtu
) {
6952 conn
->mtu
= hcon
->hdev
->le_mtu
;
6957 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6961 conn
->feat_mask
= 0;
6963 conn
->local_fixed_chan
= L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
;
6965 if (hcon
->type
== ACL_LINK
&&
6966 test_bit(HCI_HS_ENABLED
, &hcon
->hdev
->dev_flags
))
6967 conn
->local_fixed_chan
|= L2CAP_FC_A2MP
;
6969 if (bredr_sc_enabled(hcon
->hdev
) &&
6970 test_bit(HCI_LE_ENABLED
, &hcon
->hdev
->dev_flags
))
6971 conn
->local_fixed_chan
|= L2CAP_FC_SMP_BREDR
;
6973 mutex_init(&conn
->ident_lock
);
6974 mutex_init(&conn
->chan_lock
);
6976 INIT_LIST_HEAD(&conn
->chan_l
);
6977 INIT_LIST_HEAD(&conn
->users
);
6979 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
6981 skb_queue_head_init(&conn
->pending_rx
);
6982 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
6983 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
6985 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
6990 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
6994 if (bdaddr_type_is_le(dst_type
))
6995 return (psm
<= 0x00ff);
6997 /* PSM must be odd and lsb of upper byte must be 0 */
6998 return ((psm
& 0x0101) == 0x0001);
7001 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7002 bdaddr_t
*dst
, u8 dst_type
)
7004 struct l2cap_conn
*conn
;
7005 struct hci_conn
*hcon
;
7006 struct hci_dev
*hdev
;
7009 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7010 dst_type
, __le16_to_cpu(psm
));
7012 hdev
= hci_get_route(dst
, &chan
->src
);
7014 return -EHOSTUNREACH
;
7018 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7019 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7024 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7029 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7034 switch (chan
->mode
) {
7035 case L2CAP_MODE_BASIC
:
7037 case L2CAP_MODE_LE_FLOWCTL
:
7038 l2cap_le_flowctl_init(chan
);
7040 case L2CAP_MODE_ERTM
:
7041 case L2CAP_MODE_STREAMING
:
7050 switch (chan
->state
) {
7054 /* Already connecting */
7059 /* Already connected */
7073 /* Set destination address and psm */
7074 bacpy(&chan
->dst
, dst
);
7075 chan
->dst_type
= dst_type
;
7080 if (bdaddr_type_is_le(dst_type
)) {
7083 /* Convert from L2CAP channel address type to HCI address type
7085 if (dst_type
== BDADDR_LE_PUBLIC
)
7086 dst_type
= ADDR_LE_DEV_PUBLIC
;
7088 dst_type
= ADDR_LE_DEV_RANDOM
;
7090 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7091 role
= HCI_ROLE_SLAVE
;
7093 role
= HCI_ROLE_MASTER
;
7095 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7096 HCI_LE_CONN_TIMEOUT
, role
);
7098 u8 auth_type
= l2cap_get_auth_type(chan
);
7099 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7103 err
= PTR_ERR(hcon
);
7107 conn
= l2cap_conn_add(hcon
);
7109 hci_conn_drop(hcon
);
7114 mutex_lock(&conn
->chan_lock
);
7115 l2cap_chan_lock(chan
);
7117 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7118 hci_conn_drop(hcon
);
7123 /* Update source addr of the socket */
7124 bacpy(&chan
->src
, &hcon
->src
);
7125 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7127 __l2cap_chan_add(conn
, chan
);
7129 /* l2cap_chan_add takes its own ref so we can drop this one */
7130 hci_conn_drop(hcon
);
7132 l2cap_state_change(chan
, BT_CONNECT
);
7133 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7135 /* Release chan->sport so that it can be reused by other
7136 * sockets (as it's only used for listening sockets).
7138 write_lock(&chan_list_lock
);
7140 write_unlock(&chan_list_lock
);
7142 if (hcon
->state
== BT_CONNECTED
) {
7143 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7144 __clear_chan_timer(chan
);
7145 if (l2cap_chan_check_security(chan
, true))
7146 l2cap_state_change(chan
, BT_CONNECTED
);
7148 l2cap_do_start(chan
);
7154 l2cap_chan_unlock(chan
);
7155 mutex_unlock(&conn
->chan_lock
);
7157 hci_dev_unlock(hdev
);
7161 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7163 /* ---- L2CAP interface with lower layer (HCI) ---- */
7165 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7167 int exact
= 0, lm1
= 0, lm2
= 0;
7168 struct l2cap_chan
*c
;
7170 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7172 /* Find listening sockets and check their link_mode */
7173 read_lock(&chan_list_lock
);
7174 list_for_each_entry(c
, &chan_list
, global_l
) {
7175 if (c
->state
!= BT_LISTEN
)
7178 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7179 lm1
|= HCI_LM_ACCEPT
;
7180 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7181 lm1
|= HCI_LM_MASTER
;
7183 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7184 lm2
|= HCI_LM_ACCEPT
;
7185 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7186 lm2
|= HCI_LM_MASTER
;
7189 read_unlock(&chan_list_lock
);
7191 return exact
? lm1
: lm2
;
7194 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7195 * from an existing channel in the list or from the beginning of the
7196 * global list (by passing NULL as first parameter).
7198 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7199 bdaddr_t
*src
, u8 link_type
)
7201 read_lock(&chan_list_lock
);
7204 c
= list_next_entry(c
, global_l
);
7206 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7208 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7209 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7211 if (c
->state
!= BT_LISTEN
)
7213 if (bacmp(&c
->src
, src
) && bacmp(&c
->src
, BDADDR_ANY
))
7215 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
7217 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
7221 read_unlock(&chan_list_lock
);
7225 read_unlock(&chan_list_lock
);
7230 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7232 struct hci_dev
*hdev
= hcon
->hdev
;
7233 struct l2cap_conn
*conn
;
7234 struct l2cap_chan
*pchan
;
7237 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7240 l2cap_conn_del(hcon
, bt_to_errno(status
));
7244 conn
= l2cap_conn_add(hcon
);
7248 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
7250 /* If device is blocked, do not create channels for it */
7251 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7254 /* Find fixed channels and notify them of the new connection. We
7255 * use multiple individual lookups, continuing each time where
7256 * we left off, because the list lock would prevent calling the
7257 * potentially sleeping l2cap_chan_lock() function.
7259 pchan
= l2cap_global_fixed_chan(NULL
, &hdev
->bdaddr
, hcon
->type
);
7261 struct l2cap_chan
*chan
, *next
;
7263 /* Client fixed channels should override server ones */
7264 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7267 l2cap_chan_lock(pchan
);
7268 chan
= pchan
->ops
->new_connection(pchan
);
7270 bacpy(&chan
->src
, &hcon
->src
);
7271 bacpy(&chan
->dst
, &hcon
->dst
);
7272 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7273 chan
->dst_type
= dst_type
;
7275 __l2cap_chan_add(conn
, chan
);
7278 l2cap_chan_unlock(pchan
);
7280 next
= l2cap_global_fixed_chan(pchan
, &hdev
->bdaddr
,
7282 l2cap_chan_put(pchan
);
7286 l2cap_conn_ready(conn
);
7289 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7291 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7293 BT_DBG("hcon %p", hcon
);
7296 return HCI_ERROR_REMOTE_USER_TERM
;
7297 return conn
->disc_reason
;
7300 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7302 BT_DBG("hcon %p reason %d", hcon
, reason
);
7304 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7307 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7309 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7312 if (encrypt
== 0x00) {
7313 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7314 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7315 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7316 chan
->sec_level
== BT_SECURITY_FIPS
)
7317 l2cap_chan_close(chan
, ECONNREFUSED
);
7319 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7320 __clear_chan_timer(chan
);
7324 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7326 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7327 struct l2cap_chan
*chan
;
7332 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7334 mutex_lock(&conn
->chan_lock
);
7336 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7337 l2cap_chan_lock(chan
);
7339 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7340 state_to_string(chan
->state
));
7342 if (chan
->scid
== L2CAP_CID_A2MP
) {
7343 l2cap_chan_unlock(chan
);
7347 if (!status
&& encrypt
)
7348 chan
->sec_level
= hcon
->sec_level
;
7350 if (!__l2cap_no_conn_pending(chan
)) {
7351 l2cap_chan_unlock(chan
);
7355 if (!status
&& (chan
->state
== BT_CONNECTED
||
7356 chan
->state
== BT_CONFIG
)) {
7357 chan
->ops
->resume(chan
);
7358 l2cap_check_encryption(chan
, encrypt
);
7359 l2cap_chan_unlock(chan
);
7363 if (chan
->state
== BT_CONNECT
) {
7365 l2cap_start_connection(chan
);
7367 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7368 } else if (chan
->state
== BT_CONNECT2
&&
7369 chan
->mode
!= L2CAP_MODE_LE_FLOWCTL
) {
7370 struct l2cap_conn_rsp rsp
;
7374 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7375 res
= L2CAP_CR_PEND
;
7376 stat
= L2CAP_CS_AUTHOR_PEND
;
7377 chan
->ops
->defer(chan
);
7379 l2cap_state_change(chan
, BT_CONFIG
);
7380 res
= L2CAP_CR_SUCCESS
;
7381 stat
= L2CAP_CS_NO_INFO
;
7384 l2cap_state_change(chan
, BT_DISCONN
);
7385 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7386 res
= L2CAP_CR_SEC_BLOCK
;
7387 stat
= L2CAP_CS_NO_INFO
;
7390 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7391 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7392 rsp
.result
= cpu_to_le16(res
);
7393 rsp
.status
= cpu_to_le16(stat
);
7394 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7397 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7398 res
== L2CAP_CR_SUCCESS
) {
7400 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7401 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7403 l2cap_build_conf_req(chan
, buf
),
7405 chan
->num_conf_req
++;
7409 l2cap_chan_unlock(chan
);
7412 mutex_unlock(&conn
->chan_lock
);
7417 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7419 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7420 struct l2cap_hdr
*hdr
;
7423 /* For AMP controller do not create l2cap conn */
7424 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7428 conn
= l2cap_conn_add(hcon
);
7433 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7437 case ACL_START_NO_FLUSH
:
7440 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7441 kfree_skb(conn
->rx_skb
);
7442 conn
->rx_skb
= NULL
;
7444 l2cap_conn_unreliable(conn
, ECOMM
);
7447 /* Start fragment always begin with Basic L2CAP header */
7448 if (skb
->len
< L2CAP_HDR_SIZE
) {
7449 BT_ERR("Frame is too short (len %d)", skb
->len
);
7450 l2cap_conn_unreliable(conn
, ECOMM
);
7454 hdr
= (struct l2cap_hdr
*) skb
->data
;
7455 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7457 if (len
== skb
->len
) {
7458 /* Complete frame received */
7459 l2cap_recv_frame(conn
, skb
);
7463 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7465 if (skb
->len
> len
) {
7466 BT_ERR("Frame is too long (len %d, expected len %d)",
7468 l2cap_conn_unreliable(conn
, ECOMM
);
7472 /* Allocate skb for the complete frame (with header) */
7473 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7477 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7479 conn
->rx_len
= len
- skb
->len
;
7483 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7485 if (!conn
->rx_len
) {
7486 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7487 l2cap_conn_unreliable(conn
, ECOMM
);
7491 if (skb
->len
> conn
->rx_len
) {
7492 BT_ERR("Fragment is too long (len %d, expected %d)",
7493 skb
->len
, conn
->rx_len
);
7494 kfree_skb(conn
->rx_skb
);
7495 conn
->rx_skb
= NULL
;
7497 l2cap_conn_unreliable(conn
, ECOMM
);
7501 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7503 conn
->rx_len
-= skb
->len
;
7505 if (!conn
->rx_len
) {
7506 /* Complete frame received. l2cap_recv_frame
7507 * takes ownership of the skb so set the global
7508 * rx_skb pointer to NULL first.
7510 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7511 conn
->rx_skb
= NULL
;
7512 l2cap_recv_frame(conn
, rx_skb
);
7522 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7524 struct l2cap_chan
*c
;
7526 read_lock(&chan_list_lock
);
7528 list_for_each_entry(c
, &chan_list
, global_l
) {
7529 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7531 c
->state
, __le16_to_cpu(c
->psm
),
7532 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7533 c
->sec_level
, c
->mode
);
7536 read_unlock(&chan_list_lock
);
7541 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7543 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7546 static const struct file_operations l2cap_debugfs_fops
= {
7547 .open
= l2cap_debugfs_open
,
7549 .llseek
= seq_lseek
,
7550 .release
= single_release
,
7553 static struct dentry
*l2cap_debugfs
;
7555 int __init
l2cap_init(void)
7559 err
= l2cap_init_sockets();
7563 if (IS_ERR_OR_NULL(bt_debugfs
))
7566 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7567 NULL
, &l2cap_debugfs_fops
);
7569 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7571 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7577 void l2cap_exit(void)
7579 debugfs_remove(l2cap_debugfs
);
7580 l2cap_cleanup_sockets();
7583 module_param(disable_ertm
, bool, 0644);
7584 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");