2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
50 static LIST_HEAD(chan_list
);
51 static DEFINE_RWLOCK(chan_list_lock
);
53 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
54 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
56 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
57 u8 code
, u8 ident
, u16 dlen
, void *data
);
58 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
60 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
61 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
63 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
64 struct sk_buff_head
*skbs
, u8 event
);
66 static inline u8
bdaddr_type(u8 link_type
, u8 bdaddr_type
)
68 if (link_type
== LE_LINK
) {
69 if (bdaddr_type
== ADDR_LE_DEV_PUBLIC
)
70 return BDADDR_LE_PUBLIC
;
72 return BDADDR_LE_RANDOM
;
78 static inline u8
bdaddr_src_type(struct hci_conn
*hcon
)
80 return bdaddr_type(hcon
->type
, hcon
->src_type
);
83 static inline u8
bdaddr_dst_type(struct hci_conn
*hcon
)
85 return bdaddr_type(hcon
->type
, hcon
->dst_type
);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
102 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
105 struct l2cap_chan
*c
;
107 list_for_each_entry(c
, &conn
->chan_l
, list
) {
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
119 struct l2cap_chan
*c
;
121 mutex_lock(&conn
->chan_lock
);
122 c
= __l2cap_get_chan_by_scid(conn
, cid
);
125 mutex_unlock(&conn
->chan_lock
);
130 /* Find channel with given DCID.
131 * Returns locked channel.
133 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
136 struct l2cap_chan
*c
;
138 mutex_lock(&conn
->chan_lock
);
139 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
142 mutex_unlock(&conn
->chan_lock
);
147 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
150 struct l2cap_chan
*c
;
152 list_for_each_entry(c
, &conn
->chan_l
, list
) {
153 if (c
->ident
== ident
)
159 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
162 struct l2cap_chan
*c
;
164 mutex_lock(&conn
->chan_lock
);
165 c
= __l2cap_get_chan_by_ident(conn
, ident
);
168 mutex_unlock(&conn
->chan_lock
);
173 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
175 struct l2cap_chan
*c
;
177 list_for_each_entry(c
, &chan_list
, global_l
) {
178 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
184 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
188 write_lock(&chan_list_lock
);
190 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
203 for (p
= 0x1001; p
< 0x1100; p
+= 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
205 chan
->psm
= cpu_to_le16(p
);
206 chan
->sport
= cpu_to_le16(p
);
213 write_unlock(&chan_list_lock
);
216 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
218 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
220 write_lock(&chan_list_lock
);
222 /* Override the defaults (which are for conn-oriented) */
223 chan
->omtu
= L2CAP_DEFAULT_MTU
;
224 chan
->chan_type
= L2CAP_CHAN_FIXED
;
228 write_unlock(&chan_list_lock
);
233 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
237 if (conn
->hcon
->type
== LE_LINK
)
238 dyn_end
= L2CAP_CID_LE_DYN_END
;
240 dyn_end
= L2CAP_CID_DYN_END
;
242 for (cid
= L2CAP_CID_DYN_START
; cid
<= dyn_end
; cid
++) {
243 if (!__l2cap_get_chan_by_scid(conn
, cid
))
250 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
252 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
253 state_to_string(state
));
256 chan
->ops
->state_change(chan
, state
, 0);
259 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
263 chan
->ops
->state_change(chan
, chan
->state
, err
);
266 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
268 chan
->ops
->state_change(chan
, chan
->state
, err
);
271 static void __set_retrans_timer(struct l2cap_chan
*chan
)
273 if (!delayed_work_pending(&chan
->monitor_timer
) &&
274 chan
->retrans_timeout
) {
275 l2cap_set_timer(chan
, &chan
->retrans_timer
,
276 msecs_to_jiffies(chan
->retrans_timeout
));
280 static void __set_monitor_timer(struct l2cap_chan
*chan
)
282 __clear_retrans_timer(chan
);
283 if (chan
->monitor_timeout
) {
284 l2cap_set_timer(chan
, &chan
->monitor_timer
,
285 msecs_to_jiffies(chan
->monitor_timeout
));
289 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
294 skb_queue_walk(head
, skb
) {
295 if (bt_cb(skb
)->l2cap
.txseq
== seq
)
302 /* ---- L2CAP sequence number lists ---- */
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
313 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
315 size_t alloc_size
, i
;
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
321 alloc_size
= roundup_pow_of_two(size
);
323 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
327 seq_list
->mask
= alloc_size
- 1;
328 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
329 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
330 for (i
= 0; i
< alloc_size
; i
++)
331 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
338 kfree(seq_list
->list
);
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
344 /* Constant-time check for list membership */
345 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
348 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
350 u16 seq
= seq_list
->head
;
351 u16 mask
= seq_list
->mask
;
353 seq_list
->head
= seq_list
->list
[seq
& mask
];
354 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
356 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
357 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
358 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 /* Set default lock nesting level */
437 atomic_set(&chan
->nesting
, L2CAP_NESTING_NORMAL
);
439 write_lock(&chan_list_lock
);
440 list_add(&chan
->global_l
, &chan_list
);
441 write_unlock(&chan_list_lock
);
443 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
445 chan
->state
= BT_OPEN
;
447 kref_init(&chan
->kref
);
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
452 BT_DBG("chan %p", chan
);
456 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
458 static void l2cap_chan_destroy(struct kref
*kref
)
460 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
462 BT_DBG("chan %p", chan
);
464 write_lock(&chan_list_lock
);
465 list_del(&chan
->global_l
);
466 write_unlock(&chan_list_lock
);
471 void l2cap_chan_hold(struct l2cap_chan
*c
)
473 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 void l2cap_chan_put(struct l2cap_chan
*c
)
480 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
482 kref_put(&c
->kref
, l2cap_chan_destroy
);
484 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
486 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
488 chan
->fcs
= L2CAP_FCS_CRC16
;
489 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
490 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
491 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
492 chan
->remote_max_tx
= chan
->max_tx
;
493 chan
->remote_tx_win
= chan
->tx_win
;
494 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
495 chan
->sec_level
= BT_SECURITY_LOW
;
496 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
497 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
498 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
499 chan
->conf_state
= 0;
501 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
505 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
508 chan
->sdu_last_frag
= NULL
;
510 chan
->tx_credits
= 0;
511 chan
->rx_credits
= le_max_credits
;
512 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
514 skb_queue_head_init(&chan
->tx_q
);
517 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
520 __le16_to_cpu(chan
->psm
), chan
->dcid
);
522 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
526 switch (chan
->chan_type
) {
527 case L2CAP_CHAN_CONN_ORIENTED
:
528 /* Alloc CID for connection-oriented socket */
529 chan
->scid
= l2cap_alloc_cid(conn
);
530 if (conn
->hcon
->type
== ACL_LINK
)
531 chan
->omtu
= L2CAP_DEFAULT_MTU
;
534 case L2CAP_CHAN_CONN_LESS
:
535 /* Connectionless socket */
536 chan
->scid
= L2CAP_CID_CONN_LESS
;
537 chan
->dcid
= L2CAP_CID_CONN_LESS
;
538 chan
->omtu
= L2CAP_DEFAULT_MTU
;
541 case L2CAP_CHAN_FIXED
:
542 /* Caller will set CID and CID specific MTU values */
546 /* Raw socket can send/recv signalling messages only */
547 chan
->scid
= L2CAP_CID_SIGNALING
;
548 chan
->dcid
= L2CAP_CID_SIGNALING
;
549 chan
->omtu
= L2CAP_DEFAULT_MTU
;
552 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
553 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
554 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
555 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
556 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
557 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
559 l2cap_chan_hold(chan
);
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
563 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
564 hci_conn_hold(conn
->hcon
);
566 list_add(&chan
->list
, &conn
->chan_l
);
569 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
571 mutex_lock(&conn
->chan_lock
);
572 __l2cap_chan_add(conn
, chan
);
573 mutex_unlock(&conn
->chan_lock
);
576 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
578 struct l2cap_conn
*conn
= chan
->conn
;
580 __clear_chan_timer(chan
);
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan
, conn
, err
,
583 state_to_string(chan
->state
));
585 chan
->ops
->teardown(chan
, err
);
588 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
589 /* Delete from channel list */
590 list_del(&chan
->list
);
592 l2cap_chan_put(chan
);
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
600 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
601 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
602 hci_conn_drop(conn
->hcon
);
604 if (mgr
&& mgr
->bredr_chan
== chan
)
605 mgr
->bredr_chan
= NULL
;
608 if (chan
->hs_hchan
) {
609 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
611 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
612 amp_disconnect_logical_link(hs_hchan
);
615 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
619 case L2CAP_MODE_BASIC
:
622 case L2CAP_MODE_LE_FLOWCTL
:
623 skb_queue_purge(&chan
->tx_q
);
626 case L2CAP_MODE_ERTM
:
627 __clear_retrans_timer(chan
);
628 __clear_monitor_timer(chan
);
629 __clear_ack_timer(chan
);
631 skb_queue_purge(&chan
->srej_q
);
633 l2cap_seq_list_free(&chan
->srej_list
);
634 l2cap_seq_list_free(&chan
->retrans_list
);
638 case L2CAP_MODE_STREAMING
:
639 skb_queue_purge(&chan
->tx_q
);
645 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
647 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
649 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
650 id_addr_update_work
);
651 struct hci_conn
*hcon
= conn
->hcon
;
652 struct l2cap_chan
*chan
;
654 mutex_lock(&conn
->chan_lock
);
656 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
657 l2cap_chan_lock(chan
);
658 bacpy(&chan
->dst
, &hcon
->dst
);
659 chan
->dst_type
= bdaddr_dst_type(hcon
);
660 l2cap_chan_unlock(chan
);
663 mutex_unlock(&conn
->chan_lock
);
666 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
668 struct l2cap_conn
*conn
= chan
->conn
;
669 struct l2cap_le_conn_rsp rsp
;
672 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
673 result
= L2CAP_CR_AUTHORIZATION
;
675 result
= L2CAP_CR_BAD_PSM
;
677 l2cap_state_change(chan
, BT_DISCONN
);
679 rsp
.dcid
= cpu_to_le16(chan
->scid
);
680 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
681 rsp
.mps
= cpu_to_le16(chan
->mps
);
682 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
683 rsp
.result
= cpu_to_le16(result
);
685 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
689 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
691 struct l2cap_conn
*conn
= chan
->conn
;
692 struct l2cap_conn_rsp rsp
;
695 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
696 result
= L2CAP_CR_SEC_BLOCK
;
698 result
= L2CAP_CR_BAD_PSM
;
700 l2cap_state_change(chan
, BT_DISCONN
);
702 rsp
.scid
= cpu_to_le16(chan
->dcid
);
703 rsp
.dcid
= cpu_to_le16(chan
->scid
);
704 rsp
.result
= cpu_to_le16(result
);
705 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
707 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
710 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
712 struct l2cap_conn
*conn
= chan
->conn
;
714 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
716 switch (chan
->state
) {
718 chan
->ops
->teardown(chan
, 0);
723 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
724 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
725 l2cap_send_disconn_req(chan
, reason
);
727 l2cap_chan_del(chan
, reason
);
731 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
732 if (conn
->hcon
->type
== ACL_LINK
)
733 l2cap_chan_connect_reject(chan
);
734 else if (conn
->hcon
->type
== LE_LINK
)
735 l2cap_chan_le_connect_reject(chan
);
738 l2cap_chan_del(chan
, reason
);
743 l2cap_chan_del(chan
, reason
);
747 chan
->ops
->teardown(chan
, 0);
751 EXPORT_SYMBOL(l2cap_chan_close
);
753 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
755 switch (chan
->chan_type
) {
757 switch (chan
->sec_level
) {
758 case BT_SECURITY_HIGH
:
759 case BT_SECURITY_FIPS
:
760 return HCI_AT_DEDICATED_BONDING_MITM
;
761 case BT_SECURITY_MEDIUM
:
762 return HCI_AT_DEDICATED_BONDING
;
764 return HCI_AT_NO_BONDING
;
767 case L2CAP_CHAN_CONN_LESS
:
768 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
769 if (chan
->sec_level
== BT_SECURITY_LOW
)
770 chan
->sec_level
= BT_SECURITY_SDP
;
772 if (chan
->sec_level
== BT_SECURITY_HIGH
||
773 chan
->sec_level
== BT_SECURITY_FIPS
)
774 return HCI_AT_NO_BONDING_MITM
;
776 return HCI_AT_NO_BONDING
;
778 case L2CAP_CHAN_CONN_ORIENTED
:
779 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
780 if (chan
->sec_level
== BT_SECURITY_LOW
)
781 chan
->sec_level
= BT_SECURITY_SDP
;
783 if (chan
->sec_level
== BT_SECURITY_HIGH
||
784 chan
->sec_level
== BT_SECURITY_FIPS
)
785 return HCI_AT_NO_BONDING_MITM
;
787 return HCI_AT_NO_BONDING
;
791 switch (chan
->sec_level
) {
792 case BT_SECURITY_HIGH
:
793 case BT_SECURITY_FIPS
:
794 return HCI_AT_GENERAL_BONDING_MITM
;
795 case BT_SECURITY_MEDIUM
:
796 return HCI_AT_GENERAL_BONDING
;
798 return HCI_AT_NO_BONDING
;
804 /* Service level security */
805 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
807 struct l2cap_conn
*conn
= chan
->conn
;
810 if (conn
->hcon
->type
== LE_LINK
)
811 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
813 auth_type
= l2cap_get_auth_type(chan
);
815 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
819 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
829 mutex_lock(&conn
->ident_lock
);
831 if (++conn
->tx_ident
> 128)
836 mutex_unlock(&conn
->ident_lock
);
841 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
844 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
847 BT_DBG("code 0x%2.2x", code
);
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn
->hcon
->hdev
) ||
855 conn
->hcon
->type
== LE_LINK
)
856 flags
= ACL_START_NO_FLUSH
;
860 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
861 skb
->priority
= HCI_PRIO_MAX
;
863 hci_send_acl(conn
->hchan
, skb
, flags
);
866 static bool __chan_is_moving(struct l2cap_chan
*chan
)
868 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
869 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
872 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
874 struct hci_conn
*hcon
= chan
->conn
->hcon
;
877 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
880 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
882 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
893 if (hcon
->type
== LE_LINK
||
894 (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
895 lmp_no_flush_capable(hcon
->hdev
)))
896 flags
= ACL_START_NO_FLUSH
;
900 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
901 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
904 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
906 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
907 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
909 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
912 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
913 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
920 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
921 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
928 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
930 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
931 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
933 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
936 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
937 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
944 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
945 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
952 static inline void __unpack_control(struct l2cap_chan
*chan
,
955 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
956 __unpack_extended_control(get_unaligned_le32(skb
->data
),
958 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
960 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
962 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
966 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
970 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
971 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
973 if (control
->sframe
) {
974 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
975 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
976 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
978 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
979 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
985 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
989 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
990 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
992 if (control
->sframe
) {
993 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
994 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
995 packed
|= L2CAP_CTRL_FRAME_TYPE
;
997 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
998 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1004 static inline void __pack_control(struct l2cap_chan
*chan
,
1005 struct l2cap_ctrl
*control
,
1006 struct sk_buff
*skb
)
1008 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1009 put_unaligned_le32(__pack_extended_control(control
),
1010 skb
->data
+ L2CAP_HDR_SIZE
);
1012 put_unaligned_le16(__pack_enhanced_control(control
),
1013 skb
->data
+ L2CAP_HDR_SIZE
);
1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
1019 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1020 return L2CAP_EXT_HDR_SIZE
;
1022 return L2CAP_ENH_HDR_SIZE
;
1025 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1028 struct sk_buff
*skb
;
1029 struct l2cap_hdr
*lh
;
1030 int hlen
= __ertm_hdr_size(chan
);
1032 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1033 hlen
+= L2CAP_FCS_SIZE
;
1035 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1038 return ERR_PTR(-ENOMEM
);
1040 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1041 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1042 lh
->cid
= cpu_to_le16(chan
->dcid
);
1044 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1045 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1047 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1049 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1050 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1051 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1054 skb
->priority
= HCI_PRIO_MAX
;
1058 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1059 struct l2cap_ctrl
*control
)
1061 struct sk_buff
*skb
;
1064 BT_DBG("chan %p, control %p", chan
, control
);
1066 if (!control
->sframe
)
1069 if (__chan_is_moving(chan
))
1072 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1076 if (control
->super
== L2CAP_SUPER_RR
)
1077 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1078 else if (control
->super
== L2CAP_SUPER_RNR
)
1079 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1081 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1082 chan
->last_acked_seq
= control
->reqseq
;
1083 __clear_ack_timer(chan
);
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1087 control
->final
, control
->poll
, control
->super
);
1089 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1090 control_field
= __pack_extended_control(control
);
1092 control_field
= __pack_enhanced_control(control
);
1094 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1096 l2cap_do_send(chan
, skb
);
1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1101 struct l2cap_ctrl control
;
1103 BT_DBG("chan %p, poll %d", chan
, poll
);
1105 memset(&control
, 0, sizeof(control
));
1107 control
.poll
= poll
;
1109 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1110 control
.super
= L2CAP_SUPER_RNR
;
1112 control
.super
= L2CAP_SUPER_RR
;
1114 control
.reqseq
= chan
->buffer_seq
;
1115 l2cap_send_sframe(chan
, &control
);
1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1120 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1123 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1126 static bool __amp_capable(struct l2cap_chan
*chan
)
1128 struct l2cap_conn
*conn
= chan
->conn
;
1129 struct hci_dev
*hdev
;
1130 bool amp_available
= false;
1132 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
1135 if (!(conn
->remote_fixed_chan
& L2CAP_FC_A2MP
))
1138 read_lock(&hci_dev_list_lock
);
1139 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1140 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1141 test_bit(HCI_UP
, &hdev
->flags
)) {
1142 amp_available
= true;
1146 read_unlock(&hci_dev_list_lock
);
1148 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1149 return amp_available
;
1154 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1156 /* Check EFS parameters */
1160 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1162 struct l2cap_conn
*conn
= chan
->conn
;
1163 struct l2cap_conn_req req
;
1165 req
.scid
= cpu_to_le16(chan
->scid
);
1166 req
.psm
= chan
->psm
;
1168 chan
->ident
= l2cap_get_ident(conn
);
1170 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1172 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1175 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1177 struct l2cap_create_chan_req req
;
1178 req
.scid
= cpu_to_le16(chan
->scid
);
1179 req
.psm
= chan
->psm
;
1180 req
.amp_id
= amp_id
;
1182 chan
->ident
= l2cap_get_ident(chan
->conn
);
1184 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1188 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1190 struct sk_buff
*skb
;
1192 BT_DBG("chan %p", chan
);
1194 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1197 __clear_retrans_timer(chan
);
1198 __clear_monitor_timer(chan
);
1199 __clear_ack_timer(chan
);
1201 chan
->retry_count
= 0;
1202 skb_queue_walk(&chan
->tx_q
, skb
) {
1203 if (bt_cb(skb
)->l2cap
.retries
)
1204 bt_cb(skb
)->l2cap
.retries
= 1;
1209 chan
->expected_tx_seq
= chan
->buffer_seq
;
1211 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1212 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1213 l2cap_seq_list_clear(&chan
->retrans_list
);
1214 l2cap_seq_list_clear(&chan
->srej_list
);
1215 skb_queue_purge(&chan
->srej_q
);
1217 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1218 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1220 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1223 static void l2cap_move_done(struct l2cap_chan
*chan
)
1225 u8 move_role
= chan
->move_role
;
1226 BT_DBG("chan %p", chan
);
1228 chan
->move_state
= L2CAP_MOVE_STABLE
;
1229 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1231 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1234 switch (move_role
) {
1235 case L2CAP_MOVE_ROLE_INITIATOR
:
1236 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1237 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1239 case L2CAP_MOVE_ROLE_RESPONDER
:
1240 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1245 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1247 /* The channel may have already been flagged as connected in
1248 * case of receiving data before the L2CAP info req/rsp
1249 * procedure is complete.
1251 if (chan
->state
== BT_CONNECTED
)
1254 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1255 chan
->conf_state
= 0;
1256 __clear_chan_timer(chan
);
1258 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1259 chan
->ops
->suspend(chan
);
1261 chan
->state
= BT_CONNECTED
;
1263 chan
->ops
->ready(chan
);
1266 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1268 struct l2cap_conn
*conn
= chan
->conn
;
1269 struct l2cap_le_conn_req req
;
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1274 req
.psm
= chan
->psm
;
1275 req
.scid
= cpu_to_le16(chan
->scid
);
1276 req
.mtu
= cpu_to_le16(chan
->imtu
);
1277 req
.mps
= cpu_to_le16(chan
->mps
);
1278 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1280 chan
->ident
= l2cap_get_ident(conn
);
1282 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1286 static void l2cap_le_start(struct l2cap_chan
*chan
)
1288 struct l2cap_conn
*conn
= chan
->conn
;
1290 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1294 l2cap_chan_ready(chan
);
1298 if (chan
->state
== BT_CONNECT
)
1299 l2cap_le_connect(chan
);
1302 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1304 if (__amp_capable(chan
)) {
1305 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1306 a2mp_discover_amp(chan
);
1307 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1308 l2cap_le_start(chan
);
1310 l2cap_send_conn_req(chan
);
1314 static void l2cap_request_info(struct l2cap_conn
*conn
)
1316 struct l2cap_info_req req
;
1318 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1321 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1323 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1324 conn
->info_ident
= l2cap_get_ident(conn
);
1326 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1328 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1332 static void l2cap_do_start(struct l2cap_chan
*chan
)
1334 struct l2cap_conn
*conn
= chan
->conn
;
1336 if (conn
->hcon
->type
== LE_LINK
) {
1337 l2cap_le_start(chan
);
1341 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1342 l2cap_request_info(conn
);
1346 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1349 if (l2cap_chan_check_security(chan
, true) &&
1350 __l2cap_no_conn_pending(chan
))
1351 l2cap_start_connection(chan
);
1354 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1356 u32 local_feat_mask
= l2cap_feat_mask
;
1358 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1361 case L2CAP_MODE_ERTM
:
1362 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1363 case L2CAP_MODE_STREAMING
:
1364 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1370 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1372 struct l2cap_conn
*conn
= chan
->conn
;
1373 struct l2cap_disconn_req req
;
1378 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1379 __clear_retrans_timer(chan
);
1380 __clear_monitor_timer(chan
);
1381 __clear_ack_timer(chan
);
1384 if (chan
->scid
== L2CAP_CID_A2MP
) {
1385 l2cap_state_change(chan
, BT_DISCONN
);
1389 req
.dcid
= cpu_to_le16(chan
->dcid
);
1390 req
.scid
= cpu_to_le16(chan
->scid
);
1391 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1394 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1397 /* ---- L2CAP connections ---- */
1398 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1400 struct l2cap_chan
*chan
, *tmp
;
1402 BT_DBG("conn %p", conn
);
1404 mutex_lock(&conn
->chan_lock
);
1406 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1407 l2cap_chan_lock(chan
);
1409 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1410 l2cap_chan_ready(chan
);
1411 l2cap_chan_unlock(chan
);
1415 if (chan
->state
== BT_CONNECT
) {
1416 if (!l2cap_chan_check_security(chan
, true) ||
1417 !__l2cap_no_conn_pending(chan
)) {
1418 l2cap_chan_unlock(chan
);
1422 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1423 && test_bit(CONF_STATE2_DEVICE
,
1424 &chan
->conf_state
)) {
1425 l2cap_chan_close(chan
, ECONNRESET
);
1426 l2cap_chan_unlock(chan
);
1430 l2cap_start_connection(chan
);
1432 } else if (chan
->state
== BT_CONNECT2
) {
1433 struct l2cap_conn_rsp rsp
;
1435 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1436 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1438 if (l2cap_chan_check_security(chan
, false)) {
1439 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1440 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1441 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1442 chan
->ops
->defer(chan
);
1445 l2cap_state_change(chan
, BT_CONFIG
);
1446 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1447 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1450 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1451 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1454 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1457 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1458 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1459 l2cap_chan_unlock(chan
);
1463 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1464 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1465 l2cap_build_conf_req(chan
, buf
), buf
);
1466 chan
->num_conf_req
++;
1469 l2cap_chan_unlock(chan
);
1472 mutex_unlock(&conn
->chan_lock
);
1475 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1477 struct hci_conn
*hcon
= conn
->hcon
;
1478 struct hci_dev
*hdev
= hcon
->hdev
;
1480 BT_DBG("%s conn %p", hdev
->name
, conn
);
1482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1486 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1488 /* For LE slave connections, make sure the connection interval
1489 * is in the range of the minium and maximum interval that has
1490 * been configured for this connection. If not, then trigger
1491 * the connection update procedure.
1493 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1494 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1495 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1496 struct l2cap_conn_param_update_req req
;
1498 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1499 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1500 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1501 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1503 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1504 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1508 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1510 struct l2cap_chan
*chan
;
1511 struct hci_conn
*hcon
= conn
->hcon
;
1513 BT_DBG("conn %p", conn
);
1515 if (hcon
->type
== ACL_LINK
)
1516 l2cap_request_info(conn
);
1518 mutex_lock(&conn
->chan_lock
);
1520 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1522 l2cap_chan_lock(chan
);
1524 if (chan
->scid
== L2CAP_CID_A2MP
) {
1525 l2cap_chan_unlock(chan
);
1529 if (hcon
->type
== LE_LINK
) {
1530 l2cap_le_start(chan
);
1531 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1532 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1533 l2cap_chan_ready(chan
);
1534 } else if (chan
->state
== BT_CONNECT
) {
1535 l2cap_do_start(chan
);
1538 l2cap_chan_unlock(chan
);
1541 mutex_unlock(&conn
->chan_lock
);
1543 if (hcon
->type
== LE_LINK
)
1544 l2cap_le_conn_ready(conn
);
1546 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1549 /* Notify sockets that we cannot guaranty reliability anymore */
1550 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1552 struct l2cap_chan
*chan
;
1554 BT_DBG("conn %p", conn
);
1556 mutex_lock(&conn
->chan_lock
);
1558 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1559 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1560 l2cap_chan_set_err(chan
, err
);
1563 mutex_unlock(&conn
->chan_lock
);
1566 static void l2cap_info_timeout(struct work_struct
*work
)
1568 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1571 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1572 conn
->info_ident
= 0;
1574 l2cap_conn_start(conn
);
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1590 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1592 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1604 if (!list_empty(&user
->list
)) {
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1615 ret
= user
->probe(conn
, user
);
1619 list_add(&user
->list
, &conn
->users
);
1623 hci_dev_unlock(hdev
);
1626 EXPORT_SYMBOL(l2cap_register_user
);
1628 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1630 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1634 if (list_empty(&user
->list
))
1637 list_del_init(&user
->list
);
1638 user
->remove(conn
, user
);
1641 hci_dev_unlock(hdev
);
1643 EXPORT_SYMBOL(l2cap_unregister_user
);
1645 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1647 struct l2cap_user
*user
;
1649 while (!list_empty(&conn
->users
)) {
1650 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1651 list_del_init(&user
->list
);
1652 user
->remove(conn
, user
);
1656 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1658 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1659 struct l2cap_chan
*chan
, *l
;
1664 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1666 kfree_skb(conn
->rx_skb
);
1668 skb_queue_purge(&conn
->pending_rx
);
1670 /* We can not call flush_work(&conn->pending_rx_work) here since we
1671 * might block if we are running on a worker from the same workqueue
1672 * pending_rx_work is waiting on.
1674 if (work_pending(&conn
->pending_rx_work
))
1675 cancel_work_sync(&conn
->pending_rx_work
);
1677 if (work_pending(&conn
->id_addr_update_work
))
1678 cancel_work_sync(&conn
->id_addr_update_work
);
1680 l2cap_unregister_all_users(conn
);
1682 /* Force the connection to be immediately dropped */
1683 hcon
->disc_timeout
= 0;
1685 mutex_lock(&conn
->chan_lock
);
1688 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1689 l2cap_chan_hold(chan
);
1690 l2cap_chan_lock(chan
);
1692 l2cap_chan_del(chan
, err
);
1694 l2cap_chan_unlock(chan
);
1696 chan
->ops
->close(chan
);
1697 l2cap_chan_put(chan
);
1700 mutex_unlock(&conn
->chan_lock
);
1702 hci_chan_del(conn
->hchan
);
1704 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1705 cancel_delayed_work_sync(&conn
->info_timer
);
1707 hcon
->l2cap_data
= NULL
;
1709 l2cap_conn_put(conn
);
1712 static void l2cap_conn_free(struct kref
*ref
)
1714 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1716 hci_conn_put(conn
->hcon
);
1720 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1722 kref_get(&conn
->ref
);
1725 EXPORT_SYMBOL(l2cap_conn_get
);
1727 void l2cap_conn_put(struct l2cap_conn
*conn
)
1729 kref_put(&conn
->ref
, l2cap_conn_free
);
1731 EXPORT_SYMBOL(l2cap_conn_put
);
1733 /* ---- Socket interface ---- */
1735 /* Find socket with psm and source / destination bdaddr.
1736 * Returns closest match.
1738 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1743 struct l2cap_chan
*c
, *c1
= NULL
;
1745 read_lock(&chan_list_lock
);
1747 list_for_each_entry(c
, &chan_list
, global_l
) {
1748 if (state
&& c
->state
!= state
)
1751 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1754 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1757 if (c
->psm
== psm
) {
1758 int src_match
, dst_match
;
1759 int src_any
, dst_any
;
1762 src_match
= !bacmp(&c
->src
, src
);
1763 dst_match
= !bacmp(&c
->dst
, dst
);
1764 if (src_match
&& dst_match
) {
1766 read_unlock(&chan_list_lock
);
1771 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1772 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1773 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1774 (src_any
&& dst_any
))
1780 l2cap_chan_hold(c1
);
1782 read_unlock(&chan_list_lock
);
1787 static void l2cap_monitor_timeout(struct work_struct
*work
)
1789 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1790 monitor_timer
.work
);
1792 BT_DBG("chan %p", chan
);
1794 l2cap_chan_lock(chan
);
1797 l2cap_chan_unlock(chan
);
1798 l2cap_chan_put(chan
);
1802 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1804 l2cap_chan_unlock(chan
);
1805 l2cap_chan_put(chan
);
1808 static void l2cap_retrans_timeout(struct work_struct
*work
)
1810 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1811 retrans_timer
.work
);
1813 BT_DBG("chan %p", chan
);
1815 l2cap_chan_lock(chan
);
1818 l2cap_chan_unlock(chan
);
1819 l2cap_chan_put(chan
);
1823 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1824 l2cap_chan_unlock(chan
);
1825 l2cap_chan_put(chan
);
1828 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1829 struct sk_buff_head
*skbs
)
1831 struct sk_buff
*skb
;
1832 struct l2cap_ctrl
*control
;
1834 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1836 if (__chan_is_moving(chan
))
1839 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1841 while (!skb_queue_empty(&chan
->tx_q
)) {
1843 skb
= skb_dequeue(&chan
->tx_q
);
1845 bt_cb(skb
)->l2cap
.retries
= 1;
1846 control
= &bt_cb(skb
)->l2cap
;
1848 control
->reqseq
= 0;
1849 control
->txseq
= chan
->next_tx_seq
;
1851 __pack_control(chan
, control
, skb
);
1853 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1854 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1855 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1858 l2cap_do_send(chan
, skb
);
1860 BT_DBG("Sent txseq %u", control
->txseq
);
1862 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1863 chan
->frames_sent
++;
1867 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1869 struct sk_buff
*skb
, *tx_skb
;
1870 struct l2cap_ctrl
*control
;
1873 BT_DBG("chan %p", chan
);
1875 if (chan
->state
!= BT_CONNECTED
)
1878 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1881 if (__chan_is_moving(chan
))
1884 while (chan
->tx_send_head
&&
1885 chan
->unacked_frames
< chan
->remote_tx_win
&&
1886 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1888 skb
= chan
->tx_send_head
;
1890 bt_cb(skb
)->l2cap
.retries
= 1;
1891 control
= &bt_cb(skb
)->l2cap
;
1893 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1896 control
->reqseq
= chan
->buffer_seq
;
1897 chan
->last_acked_seq
= chan
->buffer_seq
;
1898 control
->txseq
= chan
->next_tx_seq
;
1900 __pack_control(chan
, control
, skb
);
1902 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1903 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1904 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1907 /* Clone after data has been modified. Data is assumed to be
1908 read-only (for locking purposes) on cloned sk_buffs.
1910 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1915 __set_retrans_timer(chan
);
1917 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1918 chan
->unacked_frames
++;
1919 chan
->frames_sent
++;
1922 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1923 chan
->tx_send_head
= NULL
;
1925 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1927 l2cap_do_send(chan
, tx_skb
);
1928 BT_DBG("Sent txseq %u", control
->txseq
);
1931 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1932 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1937 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1939 struct l2cap_ctrl control
;
1940 struct sk_buff
*skb
;
1941 struct sk_buff
*tx_skb
;
1944 BT_DBG("chan %p", chan
);
1946 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1949 if (__chan_is_moving(chan
))
1952 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1953 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1955 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1957 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1962 bt_cb(skb
)->l2cap
.retries
++;
1963 control
= bt_cb(skb
)->l2cap
;
1965 if (chan
->max_tx
!= 0 &&
1966 bt_cb(skb
)->l2cap
.retries
> chan
->max_tx
) {
1967 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1968 l2cap_send_disconn_req(chan
, ECONNRESET
);
1969 l2cap_seq_list_clear(&chan
->retrans_list
);
1973 control
.reqseq
= chan
->buffer_seq
;
1974 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1979 if (skb_cloned(skb
)) {
1980 /* Cloned sk_buffs are read-only, so we need a
1983 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1985 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1989 l2cap_seq_list_clear(&chan
->retrans_list
);
1993 /* Update skb contents */
1994 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1995 put_unaligned_le32(__pack_extended_control(&control
),
1996 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1998 put_unaligned_le16(__pack_enhanced_control(&control
),
1999 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2003 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2004 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
2005 tx_skb
->len
- L2CAP_FCS_SIZE
);
2006 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
2010 l2cap_do_send(chan
, tx_skb
);
2012 BT_DBG("Resent txseq %d", control
.txseq
);
2014 chan
->last_acked_seq
= chan
->buffer_seq
;
2018 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2019 struct l2cap_ctrl
*control
)
2021 BT_DBG("chan %p, control %p", chan
, control
);
2023 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2024 l2cap_ertm_resend(chan
);
2027 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2028 struct l2cap_ctrl
*control
)
2030 struct sk_buff
*skb
;
2032 BT_DBG("chan %p, control %p", chan
, control
);
2035 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2037 l2cap_seq_list_clear(&chan
->retrans_list
);
2039 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2042 if (chan
->unacked_frames
) {
2043 skb_queue_walk(&chan
->tx_q
, skb
) {
2044 if (bt_cb(skb
)->l2cap
.txseq
== control
->reqseq
||
2045 skb
== chan
->tx_send_head
)
2049 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2050 if (skb
== chan
->tx_send_head
)
2053 l2cap_seq_list_append(&chan
->retrans_list
,
2054 bt_cb(skb
)->l2cap
.txseq
);
2057 l2cap_ertm_resend(chan
);
2061 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2063 struct l2cap_ctrl control
;
2064 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2065 chan
->last_acked_seq
);
2068 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2069 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2071 memset(&control
, 0, sizeof(control
));
2074 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2075 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2076 __clear_ack_timer(chan
);
2077 control
.super
= L2CAP_SUPER_RNR
;
2078 control
.reqseq
= chan
->buffer_seq
;
2079 l2cap_send_sframe(chan
, &control
);
2081 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2082 l2cap_ertm_send(chan
);
2083 /* If any i-frames were sent, they included an ack */
2084 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2088 /* Ack now if the window is 3/4ths full.
2089 * Calculate without mul or div
2091 threshold
= chan
->ack_win
;
2092 threshold
+= threshold
<< 1;
2095 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2098 if (frames_to_ack
>= threshold
) {
2099 __clear_ack_timer(chan
);
2100 control
.super
= L2CAP_SUPER_RR
;
2101 control
.reqseq
= chan
->buffer_seq
;
2102 l2cap_send_sframe(chan
, &control
);
2107 __set_ack_timer(chan
);
2111 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2112 struct msghdr
*msg
, int len
,
2113 int count
, struct sk_buff
*skb
)
2115 struct l2cap_conn
*conn
= chan
->conn
;
2116 struct sk_buff
**frag
;
2119 if (copy_from_iter(skb_put(skb
, count
), count
, &msg
->msg_iter
) != count
)
2125 /* Continuation fragments (no L2CAP header) */
2126 frag
= &skb_shinfo(skb
)->frag_list
;
2128 struct sk_buff
*tmp
;
2130 count
= min_t(unsigned int, conn
->mtu
, len
);
2132 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2133 msg
->msg_flags
& MSG_DONTWAIT
);
2135 return PTR_ERR(tmp
);
2139 if (copy_from_iter(skb_put(*frag
, count
), count
,
2140 &msg
->msg_iter
) != count
)
2146 skb
->len
+= (*frag
)->len
;
2147 skb
->data_len
+= (*frag
)->len
;
2149 frag
= &(*frag
)->next
;
2155 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2156 struct msghdr
*msg
, size_t len
)
2158 struct l2cap_conn
*conn
= chan
->conn
;
2159 struct sk_buff
*skb
;
2160 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2161 struct l2cap_hdr
*lh
;
2163 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2164 __le16_to_cpu(chan
->psm
), len
);
2166 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2168 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2169 msg
->msg_flags
& MSG_DONTWAIT
);
2173 /* Create L2CAP header */
2174 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2175 lh
->cid
= cpu_to_le16(chan
->dcid
);
2176 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2177 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2179 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2180 if (unlikely(err
< 0)) {
2182 return ERR_PTR(err
);
2187 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2188 struct msghdr
*msg
, size_t len
)
2190 struct l2cap_conn
*conn
= chan
->conn
;
2191 struct sk_buff
*skb
;
2193 struct l2cap_hdr
*lh
;
2195 BT_DBG("chan %p len %zu", chan
, len
);
2197 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2199 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2200 msg
->msg_flags
& MSG_DONTWAIT
);
2204 /* Create L2CAP header */
2205 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2206 lh
->cid
= cpu_to_le16(chan
->dcid
);
2207 lh
->len
= cpu_to_le16(len
);
2209 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2210 if (unlikely(err
< 0)) {
2212 return ERR_PTR(err
);
2217 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2218 struct msghdr
*msg
, size_t len
,
2221 struct l2cap_conn
*conn
= chan
->conn
;
2222 struct sk_buff
*skb
;
2223 int err
, count
, hlen
;
2224 struct l2cap_hdr
*lh
;
2226 BT_DBG("chan %p len %zu", chan
, len
);
2229 return ERR_PTR(-ENOTCONN
);
2231 hlen
= __ertm_hdr_size(chan
);
2234 hlen
+= L2CAP_SDULEN_SIZE
;
2236 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2237 hlen
+= L2CAP_FCS_SIZE
;
2239 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2241 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2242 msg
->msg_flags
& MSG_DONTWAIT
);
2246 /* Create L2CAP header */
2247 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2248 lh
->cid
= cpu_to_le16(chan
->dcid
);
2249 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2251 /* Control header is populated later */
2252 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2253 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2255 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2258 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2260 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2261 if (unlikely(err
< 0)) {
2263 return ERR_PTR(err
);
2266 bt_cb(skb
)->l2cap
.fcs
= chan
->fcs
;
2267 bt_cb(skb
)->l2cap
.retries
= 0;
2271 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2272 struct sk_buff_head
*seg_queue
,
2273 struct msghdr
*msg
, size_t len
)
2275 struct sk_buff
*skb
;
2280 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2282 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2283 * so fragmented skbs are not used. The HCI layer's handling
2284 * of fragmented skbs is not compatible with ERTM's queueing.
2287 /* PDU size is derived from the HCI MTU */
2288 pdu_len
= chan
->conn
->mtu
;
2290 /* Constrain PDU size for BR/EDR connections */
2292 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2294 /* Adjust for largest possible L2CAP overhead. */
2296 pdu_len
-= L2CAP_FCS_SIZE
;
2298 pdu_len
-= __ertm_hdr_size(chan
);
2300 /* Remote device may have requested smaller PDUs */
2301 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2303 if (len
<= pdu_len
) {
2304 sar
= L2CAP_SAR_UNSEGMENTED
;
2308 sar
= L2CAP_SAR_START
;
2313 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2316 __skb_queue_purge(seg_queue
);
2317 return PTR_ERR(skb
);
2320 bt_cb(skb
)->l2cap
.sar
= sar
;
2321 __skb_queue_tail(seg_queue
, skb
);
2327 if (len
<= pdu_len
) {
2328 sar
= L2CAP_SAR_END
;
2331 sar
= L2CAP_SAR_CONTINUE
;
2338 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2340 size_t len
, u16 sdulen
)
2342 struct l2cap_conn
*conn
= chan
->conn
;
2343 struct sk_buff
*skb
;
2344 int err
, count
, hlen
;
2345 struct l2cap_hdr
*lh
;
2347 BT_DBG("chan %p len %zu", chan
, len
);
2350 return ERR_PTR(-ENOTCONN
);
2352 hlen
= L2CAP_HDR_SIZE
;
2355 hlen
+= L2CAP_SDULEN_SIZE
;
2357 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2359 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2360 msg
->msg_flags
& MSG_DONTWAIT
);
2364 /* Create L2CAP header */
2365 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2366 lh
->cid
= cpu_to_le16(chan
->dcid
);
2367 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2370 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2372 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2373 if (unlikely(err
< 0)) {
2375 return ERR_PTR(err
);
2381 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2382 struct sk_buff_head
*seg_queue
,
2383 struct msghdr
*msg
, size_t len
)
2385 struct sk_buff
*skb
;
2389 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2392 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2398 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2400 __skb_queue_purge(seg_queue
);
2401 return PTR_ERR(skb
);
2404 __skb_queue_tail(seg_queue
, skb
);
2410 pdu_len
+= L2CAP_SDULEN_SIZE
;
2417 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2419 struct sk_buff
*skb
;
2421 struct sk_buff_head seg_queue
;
2426 /* Connectionless channel */
2427 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2428 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2430 return PTR_ERR(skb
);
2432 /* Channel lock is released before requesting new skb and then
2433 * reacquired thus we need to recheck channel state.
2435 if (chan
->state
!= BT_CONNECTED
) {
2440 l2cap_do_send(chan
, skb
);
2444 switch (chan
->mode
) {
2445 case L2CAP_MODE_LE_FLOWCTL
:
2446 /* Check outgoing MTU */
2447 if (len
> chan
->omtu
)
2450 if (!chan
->tx_credits
)
2453 __skb_queue_head_init(&seg_queue
);
2455 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2457 if (chan
->state
!= BT_CONNECTED
) {
2458 __skb_queue_purge(&seg_queue
);
2465 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2467 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2468 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2472 if (!chan
->tx_credits
)
2473 chan
->ops
->suspend(chan
);
2479 case L2CAP_MODE_BASIC
:
2480 /* Check outgoing MTU */
2481 if (len
> chan
->omtu
)
2484 /* Create a basic PDU */
2485 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2487 return PTR_ERR(skb
);
2489 /* Channel lock is released before requesting new skb and then
2490 * reacquired thus we need to recheck channel state.
2492 if (chan
->state
!= BT_CONNECTED
) {
2497 l2cap_do_send(chan
, skb
);
2501 case L2CAP_MODE_ERTM
:
2502 case L2CAP_MODE_STREAMING
:
2503 /* Check outgoing MTU */
2504 if (len
> chan
->omtu
) {
2509 __skb_queue_head_init(&seg_queue
);
2511 /* Do segmentation before calling in to the state machine,
2512 * since it's possible to block while waiting for memory
2515 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2517 /* The channel could have been closed while segmenting,
2518 * check that it is still connected.
2520 if (chan
->state
!= BT_CONNECTED
) {
2521 __skb_queue_purge(&seg_queue
);
2528 if (chan
->mode
== L2CAP_MODE_ERTM
)
2529 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2531 l2cap_streaming_send(chan
, &seg_queue
);
2535 /* If the skbs were not queued for sending, they'll still be in
2536 * seg_queue and need to be purged.
2538 __skb_queue_purge(&seg_queue
);
2542 BT_DBG("bad state %1.1x", chan
->mode
);
2548 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2550 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2552 struct l2cap_ctrl control
;
2555 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2557 memset(&control
, 0, sizeof(control
));
2559 control
.super
= L2CAP_SUPER_SREJ
;
2561 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2562 seq
= __next_seq(chan
, seq
)) {
2563 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2564 control
.reqseq
= seq
;
2565 l2cap_send_sframe(chan
, &control
);
2566 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2570 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2573 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2575 struct l2cap_ctrl control
;
2577 BT_DBG("chan %p", chan
);
2579 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2582 memset(&control
, 0, sizeof(control
));
2584 control
.super
= L2CAP_SUPER_SREJ
;
2585 control
.reqseq
= chan
->srej_list
.tail
;
2586 l2cap_send_sframe(chan
, &control
);
2589 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2591 struct l2cap_ctrl control
;
2595 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2597 memset(&control
, 0, sizeof(control
));
2599 control
.super
= L2CAP_SUPER_SREJ
;
2601 /* Capture initial list head to allow only one pass through the list. */
2602 initial_head
= chan
->srej_list
.head
;
2605 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2606 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2609 control
.reqseq
= seq
;
2610 l2cap_send_sframe(chan
, &control
);
2611 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2612 } while (chan
->srej_list
.head
!= initial_head
);
2615 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2617 struct sk_buff
*acked_skb
;
2620 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2622 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2625 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2626 chan
->expected_ack_seq
, chan
->unacked_frames
);
2628 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2629 ackseq
= __next_seq(chan
, ackseq
)) {
2631 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2633 skb_unlink(acked_skb
, &chan
->tx_q
);
2634 kfree_skb(acked_skb
);
2635 chan
->unacked_frames
--;
2639 chan
->expected_ack_seq
= reqseq
;
2641 if (chan
->unacked_frames
== 0)
2642 __clear_retrans_timer(chan
);
2644 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2647 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2649 BT_DBG("chan %p", chan
);
2651 chan
->expected_tx_seq
= chan
->buffer_seq
;
2652 l2cap_seq_list_clear(&chan
->srej_list
);
2653 skb_queue_purge(&chan
->srej_q
);
2654 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2657 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2658 struct l2cap_ctrl
*control
,
2659 struct sk_buff_head
*skbs
, u8 event
)
2661 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2665 case L2CAP_EV_DATA_REQUEST
:
2666 if (chan
->tx_send_head
== NULL
)
2667 chan
->tx_send_head
= skb_peek(skbs
);
2669 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2670 l2cap_ertm_send(chan
);
2672 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2673 BT_DBG("Enter LOCAL_BUSY");
2674 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2676 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2677 /* The SREJ_SENT state must be aborted if we are to
2678 * enter the LOCAL_BUSY state.
2680 l2cap_abort_rx_srej_sent(chan
);
2683 l2cap_send_ack(chan
);
2686 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2687 BT_DBG("Exit LOCAL_BUSY");
2688 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2690 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2691 struct l2cap_ctrl local_control
;
2693 memset(&local_control
, 0, sizeof(local_control
));
2694 local_control
.sframe
= 1;
2695 local_control
.super
= L2CAP_SUPER_RR
;
2696 local_control
.poll
= 1;
2697 local_control
.reqseq
= chan
->buffer_seq
;
2698 l2cap_send_sframe(chan
, &local_control
);
2700 chan
->retry_count
= 1;
2701 __set_monitor_timer(chan
);
2702 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2705 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2706 l2cap_process_reqseq(chan
, control
->reqseq
);
2708 case L2CAP_EV_EXPLICIT_POLL
:
2709 l2cap_send_rr_or_rnr(chan
, 1);
2710 chan
->retry_count
= 1;
2711 __set_monitor_timer(chan
);
2712 __clear_ack_timer(chan
);
2713 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2715 case L2CAP_EV_RETRANS_TO
:
2716 l2cap_send_rr_or_rnr(chan
, 1);
2717 chan
->retry_count
= 1;
2718 __set_monitor_timer(chan
);
2719 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2721 case L2CAP_EV_RECV_FBIT
:
2722 /* Nothing to process */
2729 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2730 struct l2cap_ctrl
*control
,
2731 struct sk_buff_head
*skbs
, u8 event
)
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2737 case L2CAP_EV_DATA_REQUEST
:
2738 if (chan
->tx_send_head
== NULL
)
2739 chan
->tx_send_head
= skb_peek(skbs
);
2740 /* Queue data, but don't send. */
2741 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2743 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2744 BT_DBG("Enter LOCAL_BUSY");
2745 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2747 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2748 /* The SREJ_SENT state must be aborted if we are to
2749 * enter the LOCAL_BUSY state.
2751 l2cap_abort_rx_srej_sent(chan
);
2754 l2cap_send_ack(chan
);
2757 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2758 BT_DBG("Exit LOCAL_BUSY");
2759 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2761 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2762 struct l2cap_ctrl local_control
;
2763 memset(&local_control
, 0, sizeof(local_control
));
2764 local_control
.sframe
= 1;
2765 local_control
.super
= L2CAP_SUPER_RR
;
2766 local_control
.poll
= 1;
2767 local_control
.reqseq
= chan
->buffer_seq
;
2768 l2cap_send_sframe(chan
, &local_control
);
2770 chan
->retry_count
= 1;
2771 __set_monitor_timer(chan
);
2772 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2775 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2776 l2cap_process_reqseq(chan
, control
->reqseq
);
2780 case L2CAP_EV_RECV_FBIT
:
2781 if (control
&& control
->final
) {
2782 __clear_monitor_timer(chan
);
2783 if (chan
->unacked_frames
> 0)
2784 __set_retrans_timer(chan
);
2785 chan
->retry_count
= 0;
2786 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2787 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2790 case L2CAP_EV_EXPLICIT_POLL
:
2793 case L2CAP_EV_MONITOR_TO
:
2794 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2795 l2cap_send_rr_or_rnr(chan
, 1);
2796 __set_monitor_timer(chan
);
2797 chan
->retry_count
++;
2799 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2807 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2808 struct sk_buff_head
*skbs
, u8 event
)
2810 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2811 chan
, control
, skbs
, event
, chan
->tx_state
);
2813 switch (chan
->tx_state
) {
2814 case L2CAP_TX_STATE_XMIT
:
2815 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2817 case L2CAP_TX_STATE_WAIT_F
:
2818 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2826 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2827 struct l2cap_ctrl
*control
)
2829 BT_DBG("chan %p, control %p", chan
, control
);
2830 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2833 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2834 struct l2cap_ctrl
*control
)
2836 BT_DBG("chan %p, control %p", chan
, control
);
2837 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2840 /* Copy frame to all raw sockets on that connection */
2841 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2843 struct sk_buff
*nskb
;
2844 struct l2cap_chan
*chan
;
2846 BT_DBG("conn %p", conn
);
2848 mutex_lock(&conn
->chan_lock
);
2850 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2851 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2854 /* Don't send frame to the channel it came from */
2855 if (bt_cb(skb
)->l2cap
.chan
== chan
)
2858 nskb
= skb_clone(skb
, GFP_KERNEL
);
2861 if (chan
->ops
->recv(chan
, nskb
))
2865 mutex_unlock(&conn
->chan_lock
);
2868 /* ---- L2CAP signalling commands ---- */
2869 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2870 u8 ident
, u16 dlen
, void *data
)
2872 struct sk_buff
*skb
, **frag
;
2873 struct l2cap_cmd_hdr
*cmd
;
2874 struct l2cap_hdr
*lh
;
2877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2878 conn
, code
, ident
, dlen
);
2880 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2883 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2884 count
= min_t(unsigned int, conn
->mtu
, len
);
2886 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2890 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2891 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2893 if (conn
->hcon
->type
== LE_LINK
)
2894 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2896 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2898 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2901 cmd
->len
= cpu_to_le16(dlen
);
2904 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2905 memcpy(skb_put(skb
, count
), data
, count
);
2911 /* Continuation fragments (no L2CAP header) */
2912 frag
= &skb_shinfo(skb
)->frag_list
;
2914 count
= min_t(unsigned int, conn
->mtu
, len
);
2916 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2920 memcpy(skb_put(*frag
, count
), data
, count
);
2925 frag
= &(*frag
)->next
;
2935 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2938 struct l2cap_conf_opt
*opt
= *ptr
;
2941 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2949 *val
= *((u8
*) opt
->val
);
2953 *val
= get_unaligned_le16(opt
->val
);
2957 *val
= get_unaligned_le32(opt
->val
);
2961 *val
= (unsigned long) opt
->val
;
2965 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2969 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2971 struct l2cap_conf_opt
*opt
= *ptr
;
2973 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2980 *((u8
*) opt
->val
) = val
;
2984 put_unaligned_le16(val
, opt
->val
);
2988 put_unaligned_le32(val
, opt
->val
);
2992 memcpy(opt
->val
, (void *) val
, len
);
2996 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2999 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3001 struct l2cap_conf_efs efs
;
3003 switch (chan
->mode
) {
3004 case L2CAP_MODE_ERTM
:
3005 efs
.id
= chan
->local_id
;
3006 efs
.stype
= chan
->local_stype
;
3007 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3008 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3009 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3010 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3013 case L2CAP_MODE_STREAMING
:
3015 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3016 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3017 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3026 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3027 (unsigned long) &efs
);
3030 static void l2cap_ack_timeout(struct work_struct
*work
)
3032 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3036 BT_DBG("chan %p", chan
);
3038 l2cap_chan_lock(chan
);
3040 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3041 chan
->last_acked_seq
);
3044 l2cap_send_rr_or_rnr(chan
, 0);
3046 l2cap_chan_unlock(chan
);
3047 l2cap_chan_put(chan
);
3050 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3054 chan
->next_tx_seq
= 0;
3055 chan
->expected_tx_seq
= 0;
3056 chan
->expected_ack_seq
= 0;
3057 chan
->unacked_frames
= 0;
3058 chan
->buffer_seq
= 0;
3059 chan
->frames_sent
= 0;
3060 chan
->last_acked_seq
= 0;
3062 chan
->sdu_last_frag
= NULL
;
3065 skb_queue_head_init(&chan
->tx_q
);
3067 chan
->local_amp_id
= AMP_ID_BREDR
;
3068 chan
->move_id
= AMP_ID_BREDR
;
3069 chan
->move_state
= L2CAP_MOVE_STABLE
;
3070 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3072 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3075 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3076 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3078 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3079 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3080 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3082 skb_queue_head_init(&chan
->srej_q
);
3084 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3088 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3090 l2cap_seq_list_free(&chan
->srej_list
);
3095 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3098 case L2CAP_MODE_STREAMING
:
3099 case L2CAP_MODE_ERTM
:
3100 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3104 return L2CAP_MODE_BASIC
;
3108 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3110 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3111 (conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
));
3114 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3116 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3117 (conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
));
3120 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3121 struct l2cap_conf_rfc
*rfc
)
3123 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3124 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3126 /* Class 1 devices have must have ERTM timeouts
3127 * exceeding the Link Supervision Timeout. The
3128 * default Link Supervision Timeout for AMP
3129 * controllers is 10 seconds.
3131 * Class 1 devices use 0xffffffff for their
3132 * best-effort flush timeout, so the clamping logic
3133 * will result in a timeout that meets the above
3134 * requirement. ERTM timeouts are 16-bit values, so
3135 * the maximum timeout is 65.535 seconds.
3138 /* Convert timeout to milliseconds and round */
3139 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3141 /* This is the recommended formula for class 2 devices
3142 * that start ERTM timers when packets are sent to the
3145 ertm_to
= 3 * ertm_to
+ 500;
3147 if (ertm_to
> 0xffff)
3150 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3151 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3153 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3154 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3158 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3160 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3161 __l2cap_ews_supported(chan
->conn
)) {
3162 /* use extended control field */
3163 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3164 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3166 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3167 L2CAP_DEFAULT_TX_WINDOW
);
3168 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3170 chan
->ack_win
= chan
->tx_win
;
3173 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3175 struct l2cap_conf_req
*req
= data
;
3176 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3177 void *ptr
= req
->data
;
3180 BT_DBG("chan %p", chan
);
3182 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3185 switch (chan
->mode
) {
3186 case L2CAP_MODE_STREAMING
:
3187 case L2CAP_MODE_ERTM
:
3188 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3191 if (__l2cap_efs_supported(chan
->conn
))
3192 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3196 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3201 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3202 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3204 switch (chan
->mode
) {
3205 case L2CAP_MODE_BASIC
:
3209 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3210 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3213 rfc
.mode
= L2CAP_MODE_BASIC
;
3215 rfc
.max_transmit
= 0;
3216 rfc
.retrans_timeout
= 0;
3217 rfc
.monitor_timeout
= 0;
3218 rfc
.max_pdu_size
= 0;
3220 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3221 (unsigned long) &rfc
);
3224 case L2CAP_MODE_ERTM
:
3225 rfc
.mode
= L2CAP_MODE_ERTM
;
3226 rfc
.max_transmit
= chan
->max_tx
;
3228 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3230 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3231 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3233 rfc
.max_pdu_size
= cpu_to_le16(size
);
3235 l2cap_txwin_setup(chan
);
3237 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3238 L2CAP_DEFAULT_TX_WINDOW
);
3240 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3241 (unsigned long) &rfc
);
3243 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3244 l2cap_add_opt_efs(&ptr
, chan
);
3246 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3247 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3250 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3251 if (chan
->fcs
== L2CAP_FCS_NONE
||
3252 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3253 chan
->fcs
= L2CAP_FCS_NONE
;
3254 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3259 case L2CAP_MODE_STREAMING
:
3260 l2cap_txwin_setup(chan
);
3261 rfc
.mode
= L2CAP_MODE_STREAMING
;
3263 rfc
.max_transmit
= 0;
3264 rfc
.retrans_timeout
= 0;
3265 rfc
.monitor_timeout
= 0;
3267 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3268 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3270 rfc
.max_pdu_size
= cpu_to_le16(size
);
3272 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3273 (unsigned long) &rfc
);
3275 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3276 l2cap_add_opt_efs(&ptr
, chan
);
3278 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3279 if (chan
->fcs
== L2CAP_FCS_NONE
||
3280 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3281 chan
->fcs
= L2CAP_FCS_NONE
;
3282 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3288 req
->dcid
= cpu_to_le16(chan
->dcid
);
3289 req
->flags
= cpu_to_le16(0);
3294 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3296 struct l2cap_conf_rsp
*rsp
= data
;
3297 void *ptr
= rsp
->data
;
3298 void *req
= chan
->conf_req
;
3299 int len
= chan
->conf_len
;
3300 int type
, hint
, olen
;
3302 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3303 struct l2cap_conf_efs efs
;
3305 u16 mtu
= L2CAP_DEFAULT_MTU
;
3306 u16 result
= L2CAP_CONF_SUCCESS
;
3309 BT_DBG("chan %p", chan
);
3311 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3312 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3314 hint
= type
& L2CAP_CONF_HINT
;
3315 type
&= L2CAP_CONF_MASK
;
3318 case L2CAP_CONF_MTU
:
3322 case L2CAP_CONF_FLUSH_TO
:
3323 chan
->flush_to
= val
;
3326 case L2CAP_CONF_QOS
:
3329 case L2CAP_CONF_RFC
:
3330 if (olen
== sizeof(rfc
))
3331 memcpy(&rfc
, (void *) val
, olen
);
3334 case L2CAP_CONF_FCS
:
3335 if (val
== L2CAP_FCS_NONE
)
3336 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3339 case L2CAP_CONF_EFS
:
3341 if (olen
== sizeof(efs
))
3342 memcpy(&efs
, (void *) val
, olen
);
3345 case L2CAP_CONF_EWS
:
3346 if (!(chan
->conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
3347 return -ECONNREFUSED
;
3349 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3350 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3351 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3352 chan
->remote_tx_win
= val
;
3359 result
= L2CAP_CONF_UNKNOWN
;
3360 *((u8
*) ptr
++) = type
;
3365 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3368 switch (chan
->mode
) {
3369 case L2CAP_MODE_STREAMING
:
3370 case L2CAP_MODE_ERTM
:
3371 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3372 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3373 chan
->conn
->feat_mask
);
3378 if (__l2cap_efs_supported(chan
->conn
))
3379 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3381 return -ECONNREFUSED
;
3384 if (chan
->mode
!= rfc
.mode
)
3385 return -ECONNREFUSED
;
3391 if (chan
->mode
!= rfc
.mode
) {
3392 result
= L2CAP_CONF_UNACCEPT
;
3393 rfc
.mode
= chan
->mode
;
3395 if (chan
->num_conf_rsp
== 1)
3396 return -ECONNREFUSED
;
3398 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3399 (unsigned long) &rfc
);
3402 if (result
== L2CAP_CONF_SUCCESS
) {
3403 /* Configure output options and let the other side know
3404 * which ones we don't like. */
3406 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3407 result
= L2CAP_CONF_UNACCEPT
;
3410 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3412 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3415 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3416 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3417 efs
.stype
!= chan
->local_stype
) {
3419 result
= L2CAP_CONF_UNACCEPT
;
3421 if (chan
->num_conf_req
>= 1)
3422 return -ECONNREFUSED
;
3424 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3426 (unsigned long) &efs
);
3428 /* Send PENDING Conf Rsp */
3429 result
= L2CAP_CONF_PENDING
;
3430 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3435 case L2CAP_MODE_BASIC
:
3436 chan
->fcs
= L2CAP_FCS_NONE
;
3437 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3440 case L2CAP_MODE_ERTM
:
3441 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3442 chan
->remote_tx_win
= rfc
.txwin_size
;
3444 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3446 chan
->remote_max_tx
= rfc
.max_transmit
;
3448 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3449 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3450 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3451 rfc
.max_pdu_size
= cpu_to_le16(size
);
3452 chan
->remote_mps
= size
;
3454 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3456 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3458 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3459 sizeof(rfc
), (unsigned long) &rfc
);
3461 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3462 chan
->remote_id
= efs
.id
;
3463 chan
->remote_stype
= efs
.stype
;
3464 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3465 chan
->remote_flush_to
=
3466 le32_to_cpu(efs
.flush_to
);
3467 chan
->remote_acc_lat
=
3468 le32_to_cpu(efs
.acc_lat
);
3469 chan
->remote_sdu_itime
=
3470 le32_to_cpu(efs
.sdu_itime
);
3471 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3473 (unsigned long) &efs
);
3477 case L2CAP_MODE_STREAMING
:
3478 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3479 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3480 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3481 rfc
.max_pdu_size
= cpu_to_le16(size
);
3482 chan
->remote_mps
= size
;
3484 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3486 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3487 (unsigned long) &rfc
);
3492 result
= L2CAP_CONF_UNACCEPT
;
3494 memset(&rfc
, 0, sizeof(rfc
));
3495 rfc
.mode
= chan
->mode
;
3498 if (result
== L2CAP_CONF_SUCCESS
)
3499 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3501 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3502 rsp
->result
= cpu_to_le16(result
);
3503 rsp
->flags
= cpu_to_le16(0);
3508 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3509 void *data
, u16
*result
)
3511 struct l2cap_conf_req
*req
= data
;
3512 void *ptr
= req
->data
;
3515 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3516 struct l2cap_conf_efs efs
;
3518 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3520 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3521 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3524 case L2CAP_CONF_MTU
:
3525 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3526 *result
= L2CAP_CONF_UNACCEPT
;
3527 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3530 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3533 case L2CAP_CONF_FLUSH_TO
:
3534 chan
->flush_to
= val
;
3535 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3539 case L2CAP_CONF_RFC
:
3540 if (olen
== sizeof(rfc
))
3541 memcpy(&rfc
, (void *)val
, olen
);
3543 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3544 rfc
.mode
!= chan
->mode
)
3545 return -ECONNREFUSED
;
3549 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3550 sizeof(rfc
), (unsigned long) &rfc
);
3553 case L2CAP_CONF_EWS
:
3554 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3555 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3559 case L2CAP_CONF_EFS
:
3560 if (olen
== sizeof(efs
))
3561 memcpy(&efs
, (void *)val
, olen
);
3563 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3564 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3565 efs
.stype
!= chan
->local_stype
)
3566 return -ECONNREFUSED
;
3568 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3569 (unsigned long) &efs
);
3572 case L2CAP_CONF_FCS
:
3573 if (*result
== L2CAP_CONF_PENDING
)
3574 if (val
== L2CAP_FCS_NONE
)
3575 set_bit(CONF_RECV_NO_FCS
,
3581 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3582 return -ECONNREFUSED
;
3584 chan
->mode
= rfc
.mode
;
3586 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3588 case L2CAP_MODE_ERTM
:
3589 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3590 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3591 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3592 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3593 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3596 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3597 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3598 chan
->local_sdu_itime
=
3599 le32_to_cpu(efs
.sdu_itime
);
3600 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3601 chan
->local_flush_to
=
3602 le32_to_cpu(efs
.flush_to
);
3606 case L2CAP_MODE_STREAMING
:
3607 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3611 req
->dcid
= cpu_to_le16(chan
->dcid
);
3612 req
->flags
= cpu_to_le16(0);
3617 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3618 u16 result
, u16 flags
)
3620 struct l2cap_conf_rsp
*rsp
= data
;
3621 void *ptr
= rsp
->data
;
3623 BT_DBG("chan %p", chan
);
3625 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3626 rsp
->result
= cpu_to_le16(result
);
3627 rsp
->flags
= cpu_to_le16(flags
);
3632 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3634 struct l2cap_le_conn_rsp rsp
;
3635 struct l2cap_conn
*conn
= chan
->conn
;
3637 BT_DBG("chan %p", chan
);
3639 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3640 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3641 rsp
.mps
= cpu_to_le16(chan
->mps
);
3642 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3643 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3645 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3649 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3651 struct l2cap_conn_rsp rsp
;
3652 struct l2cap_conn
*conn
= chan
->conn
;
3656 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3657 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3658 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3659 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3662 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3664 rsp_code
= L2CAP_CONN_RSP
;
3666 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3668 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3670 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3673 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3674 l2cap_build_conf_req(chan
, buf
), buf
);
3675 chan
->num_conf_req
++;
3678 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3682 /* Use sane default values in case a misbehaving remote device
3683 * did not send an RFC or extended window size option.
3685 u16 txwin_ext
= chan
->ack_win
;
3686 struct l2cap_conf_rfc rfc
= {
3688 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3689 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3690 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3691 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3694 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3696 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3699 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3700 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3703 case L2CAP_CONF_RFC
:
3704 if (olen
== sizeof(rfc
))
3705 memcpy(&rfc
, (void *)val
, olen
);
3707 case L2CAP_CONF_EWS
:
3714 case L2CAP_MODE_ERTM
:
3715 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3716 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3717 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3718 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3719 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3721 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3724 case L2CAP_MODE_STREAMING
:
3725 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3729 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3730 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3733 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3735 if (cmd_len
< sizeof(*rej
))
3738 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3741 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3742 cmd
->ident
== conn
->info_ident
) {
3743 cancel_delayed_work(&conn
->info_timer
);
3745 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3746 conn
->info_ident
= 0;
3748 l2cap_conn_start(conn
);
3754 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3755 struct l2cap_cmd_hdr
*cmd
,
3756 u8
*data
, u8 rsp_code
, u8 amp_id
)
3758 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3759 struct l2cap_conn_rsp rsp
;
3760 struct l2cap_chan
*chan
= NULL
, *pchan
;
3761 int result
, status
= L2CAP_CS_NO_INFO
;
3763 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3764 __le16 psm
= req
->psm
;
3766 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3768 /* Check if we have socket listening on psm */
3769 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3770 &conn
->hcon
->dst
, ACL_LINK
);
3772 result
= L2CAP_CR_BAD_PSM
;
3776 mutex_lock(&conn
->chan_lock
);
3777 l2cap_chan_lock(pchan
);
3779 /* Check if the ACL is secure enough (if not SDP) */
3780 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3781 !hci_conn_check_link_mode(conn
->hcon
)) {
3782 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3783 result
= L2CAP_CR_SEC_BLOCK
;
3787 result
= L2CAP_CR_NO_MEM
;
3789 /* Check if we already have channel with that dcid */
3790 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3793 chan
= pchan
->ops
->new_connection(pchan
);
3797 /* For certain devices (ex: HID mouse), support for authentication,
3798 * pairing and bonding is optional. For such devices, inorder to avoid
3799 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3800 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3802 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3804 bacpy(&chan
->src
, &conn
->hcon
->src
);
3805 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3806 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
3807 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
3810 chan
->local_amp_id
= amp_id
;
3812 __l2cap_chan_add(conn
, chan
);
3816 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3818 chan
->ident
= cmd
->ident
;
3820 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3821 if (l2cap_chan_check_security(chan
, false)) {
3822 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3823 l2cap_state_change(chan
, BT_CONNECT2
);
3824 result
= L2CAP_CR_PEND
;
3825 status
= L2CAP_CS_AUTHOR_PEND
;
3826 chan
->ops
->defer(chan
);
3828 /* Force pending result for AMP controllers.
3829 * The connection will succeed after the
3830 * physical link is up.
3832 if (amp_id
== AMP_ID_BREDR
) {
3833 l2cap_state_change(chan
, BT_CONFIG
);
3834 result
= L2CAP_CR_SUCCESS
;
3836 l2cap_state_change(chan
, BT_CONNECT2
);
3837 result
= L2CAP_CR_PEND
;
3839 status
= L2CAP_CS_NO_INFO
;
3842 l2cap_state_change(chan
, BT_CONNECT2
);
3843 result
= L2CAP_CR_PEND
;
3844 status
= L2CAP_CS_AUTHEN_PEND
;
3847 l2cap_state_change(chan
, BT_CONNECT2
);
3848 result
= L2CAP_CR_PEND
;
3849 status
= L2CAP_CS_NO_INFO
;
3853 l2cap_chan_unlock(pchan
);
3854 mutex_unlock(&conn
->chan_lock
);
3855 l2cap_chan_put(pchan
);
3858 rsp
.scid
= cpu_to_le16(scid
);
3859 rsp
.dcid
= cpu_to_le16(dcid
);
3860 rsp
.result
= cpu_to_le16(result
);
3861 rsp
.status
= cpu_to_le16(status
);
3862 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3864 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3865 struct l2cap_info_req info
;
3866 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3868 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3869 conn
->info_ident
= l2cap_get_ident(conn
);
3871 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3873 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3874 sizeof(info
), &info
);
3877 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3878 result
== L2CAP_CR_SUCCESS
) {
3880 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3881 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3882 l2cap_build_conf_req(chan
, buf
), buf
);
3883 chan
->num_conf_req
++;
3889 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3890 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3892 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3893 struct hci_conn
*hcon
= conn
->hcon
;
3895 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3899 if (hci_dev_test_flag(hdev
, HCI_MGMT
) &&
3900 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3901 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
3902 hci_dev_unlock(hdev
);
3904 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3908 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3909 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3912 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3913 u16 scid
, dcid
, result
, status
;
3914 struct l2cap_chan
*chan
;
3918 if (cmd_len
< sizeof(*rsp
))
3921 scid
= __le16_to_cpu(rsp
->scid
);
3922 dcid
= __le16_to_cpu(rsp
->dcid
);
3923 result
= __le16_to_cpu(rsp
->result
);
3924 status
= __le16_to_cpu(rsp
->status
);
3926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3927 dcid
, scid
, result
, status
);
3929 mutex_lock(&conn
->chan_lock
);
3932 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3938 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3947 l2cap_chan_lock(chan
);
3950 case L2CAP_CR_SUCCESS
:
3951 l2cap_state_change(chan
, BT_CONFIG
);
3954 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3956 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3959 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3960 l2cap_build_conf_req(chan
, req
), req
);
3961 chan
->num_conf_req
++;
3965 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3969 l2cap_chan_del(chan
, ECONNREFUSED
);
3973 l2cap_chan_unlock(chan
);
3976 mutex_unlock(&conn
->chan_lock
);
3981 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3983 /* FCS is enabled only in ERTM or streaming mode, if one or both
3986 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3987 chan
->fcs
= L2CAP_FCS_NONE
;
3988 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3989 chan
->fcs
= L2CAP_FCS_CRC16
;
3992 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3993 u8 ident
, u16 flags
)
3995 struct l2cap_conn
*conn
= chan
->conn
;
3997 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4000 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4001 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4003 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4004 l2cap_build_conf_rsp(chan
, data
,
4005 L2CAP_CONF_SUCCESS
, flags
), data
);
4008 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4011 struct l2cap_cmd_rej_cid rej
;
4013 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4014 rej
.scid
= __cpu_to_le16(scid
);
4015 rej
.dcid
= __cpu_to_le16(dcid
);
4017 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4020 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4021 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4024 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4027 struct l2cap_chan
*chan
;
4030 if (cmd_len
< sizeof(*req
))
4033 dcid
= __le16_to_cpu(req
->dcid
);
4034 flags
= __le16_to_cpu(req
->flags
);
4036 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4038 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4040 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4044 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4045 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4050 /* Reject if config buffer is too small. */
4051 len
= cmd_len
- sizeof(*req
);
4052 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4053 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4054 l2cap_build_conf_rsp(chan
, rsp
,
4055 L2CAP_CONF_REJECT
, flags
), rsp
);
4060 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4061 chan
->conf_len
+= len
;
4063 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4064 /* Incomplete config. Send empty response. */
4065 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4066 l2cap_build_conf_rsp(chan
, rsp
,
4067 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4071 /* Complete config. */
4072 len
= l2cap_parse_conf_req(chan
, rsp
);
4074 l2cap_send_disconn_req(chan
, ECONNRESET
);
4078 chan
->ident
= cmd
->ident
;
4079 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4080 chan
->num_conf_rsp
++;
4082 /* Reset config buffer. */
4085 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4088 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4089 set_default_fcs(chan
);
4091 if (chan
->mode
== L2CAP_MODE_ERTM
||
4092 chan
->mode
== L2CAP_MODE_STREAMING
)
4093 err
= l2cap_ertm_init(chan
);
4096 l2cap_send_disconn_req(chan
, -err
);
4098 l2cap_chan_ready(chan
);
4103 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4105 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4106 l2cap_build_conf_req(chan
, buf
), buf
);
4107 chan
->num_conf_req
++;
4110 /* Got Conf Rsp PENDING from remote side and assume we sent
4111 Conf Rsp PENDING in the code above */
4112 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4113 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4115 /* check compatibility */
4117 /* Send rsp for BR/EDR channel */
4119 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4121 chan
->ident
= cmd
->ident
;
4125 l2cap_chan_unlock(chan
);
4129 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4130 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4133 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4134 u16 scid
, flags
, result
;
4135 struct l2cap_chan
*chan
;
4136 int len
= cmd_len
- sizeof(*rsp
);
4139 if (cmd_len
< sizeof(*rsp
))
4142 scid
= __le16_to_cpu(rsp
->scid
);
4143 flags
= __le16_to_cpu(rsp
->flags
);
4144 result
= __le16_to_cpu(rsp
->result
);
4146 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4149 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4154 case L2CAP_CONF_SUCCESS
:
4155 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4156 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4159 case L2CAP_CONF_PENDING
:
4160 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4162 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4165 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4168 l2cap_send_disconn_req(chan
, ECONNRESET
);
4172 if (!chan
->hs_hcon
) {
4173 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4176 if (l2cap_check_efs(chan
)) {
4177 amp_create_logical_link(chan
);
4178 chan
->ident
= cmd
->ident
;
4184 case L2CAP_CONF_UNACCEPT
:
4185 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4188 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4189 l2cap_send_disconn_req(chan
, ECONNRESET
);
4193 /* throw out any old stored conf requests */
4194 result
= L2CAP_CONF_SUCCESS
;
4195 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4198 l2cap_send_disconn_req(chan
, ECONNRESET
);
4202 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4203 L2CAP_CONF_REQ
, len
, req
);
4204 chan
->num_conf_req
++;
4205 if (result
!= L2CAP_CONF_SUCCESS
)
4211 l2cap_chan_set_err(chan
, ECONNRESET
);
4213 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4214 l2cap_send_disconn_req(chan
, ECONNRESET
);
4218 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4221 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4223 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4224 set_default_fcs(chan
);
4226 if (chan
->mode
== L2CAP_MODE_ERTM
||
4227 chan
->mode
== L2CAP_MODE_STREAMING
)
4228 err
= l2cap_ertm_init(chan
);
4231 l2cap_send_disconn_req(chan
, -err
);
4233 l2cap_chan_ready(chan
);
4237 l2cap_chan_unlock(chan
);
4241 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4242 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4245 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4246 struct l2cap_disconn_rsp rsp
;
4248 struct l2cap_chan
*chan
;
4250 if (cmd_len
!= sizeof(*req
))
4253 scid
= __le16_to_cpu(req
->scid
);
4254 dcid
= __le16_to_cpu(req
->dcid
);
4256 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4258 mutex_lock(&conn
->chan_lock
);
4260 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4262 mutex_unlock(&conn
->chan_lock
);
4263 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4267 l2cap_chan_lock(chan
);
4269 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4270 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4271 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4273 chan
->ops
->set_shutdown(chan
);
4275 l2cap_chan_hold(chan
);
4276 l2cap_chan_del(chan
, ECONNRESET
);
4278 l2cap_chan_unlock(chan
);
4280 chan
->ops
->close(chan
);
4281 l2cap_chan_put(chan
);
4283 mutex_unlock(&conn
->chan_lock
);
4288 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4289 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4292 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4294 struct l2cap_chan
*chan
;
4296 if (cmd_len
!= sizeof(*rsp
))
4299 scid
= __le16_to_cpu(rsp
->scid
);
4300 dcid
= __le16_to_cpu(rsp
->dcid
);
4302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4304 mutex_lock(&conn
->chan_lock
);
4306 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4308 mutex_unlock(&conn
->chan_lock
);
4312 l2cap_chan_lock(chan
);
4314 l2cap_chan_hold(chan
);
4315 l2cap_chan_del(chan
, 0);
4317 l2cap_chan_unlock(chan
);
4319 chan
->ops
->close(chan
);
4320 l2cap_chan_put(chan
);
4322 mutex_unlock(&conn
->chan_lock
);
4327 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4328 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4331 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4334 if (cmd_len
!= sizeof(*req
))
4337 type
= __le16_to_cpu(req
->type
);
4339 BT_DBG("type 0x%4.4x", type
);
4341 if (type
== L2CAP_IT_FEAT_MASK
) {
4343 u32 feat_mask
= l2cap_feat_mask
;
4344 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4345 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4346 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4348 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4350 if (conn
->local_fixed_chan
& L2CAP_FC_A2MP
)
4351 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4352 | L2CAP_FEAT_EXT_WINDOW
;
4354 put_unaligned_le32(feat_mask
, rsp
->data
);
4355 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4357 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4359 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4361 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4362 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4363 rsp
->data
[0] = conn
->local_fixed_chan
;
4364 memset(rsp
->data
+ 1, 0, 7);
4365 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4368 struct l2cap_info_rsp rsp
;
4369 rsp
.type
= cpu_to_le16(type
);
4370 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4371 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4378 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4379 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4382 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4385 if (cmd_len
< sizeof(*rsp
))
4388 type
= __le16_to_cpu(rsp
->type
);
4389 result
= __le16_to_cpu(rsp
->result
);
4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4394 if (cmd
->ident
!= conn
->info_ident
||
4395 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4398 cancel_delayed_work(&conn
->info_timer
);
4400 if (result
!= L2CAP_IR_SUCCESS
) {
4401 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4402 conn
->info_ident
= 0;
4404 l2cap_conn_start(conn
);
4410 case L2CAP_IT_FEAT_MASK
:
4411 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4413 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4414 struct l2cap_info_req req
;
4415 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4417 conn
->info_ident
= l2cap_get_ident(conn
);
4419 l2cap_send_cmd(conn
, conn
->info_ident
,
4420 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4422 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4423 conn
->info_ident
= 0;
4425 l2cap_conn_start(conn
);
4429 case L2CAP_IT_FIXED_CHAN
:
4430 conn
->remote_fixed_chan
= rsp
->data
[0];
4431 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4432 conn
->info_ident
= 0;
4434 l2cap_conn_start(conn
);
4441 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4442 struct l2cap_cmd_hdr
*cmd
,
4443 u16 cmd_len
, void *data
)
4445 struct l2cap_create_chan_req
*req
= data
;
4446 struct l2cap_create_chan_rsp rsp
;
4447 struct l2cap_chan
*chan
;
4448 struct hci_dev
*hdev
;
4451 if (cmd_len
!= sizeof(*req
))
4454 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4457 psm
= le16_to_cpu(req
->psm
);
4458 scid
= le16_to_cpu(req
->scid
);
4460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4462 /* For controller id 0 make BR/EDR connection */
4463 if (req
->amp_id
== AMP_ID_BREDR
) {
4464 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4469 /* Validate AMP controller id */
4470 hdev
= hci_dev_get(req
->amp_id
);
4474 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4479 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4482 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4483 struct hci_conn
*hs_hcon
;
4485 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4489 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4496 mgr
->bredr_chan
= chan
;
4497 chan
->hs_hcon
= hs_hcon
;
4498 chan
->fcs
= L2CAP_FCS_NONE
;
4499 conn
->mtu
= hdev
->block_mtu
;
4508 rsp
.scid
= cpu_to_le16(scid
);
4509 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4510 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4512 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4518 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4520 struct l2cap_move_chan_req req
;
4523 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4525 ident
= l2cap_get_ident(chan
->conn
);
4526 chan
->ident
= ident
;
4528 req
.icid
= cpu_to_le16(chan
->scid
);
4529 req
.dest_amp_id
= dest_amp_id
;
4531 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4534 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4537 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4539 struct l2cap_move_chan_rsp rsp
;
4541 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4543 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4544 rsp
.result
= cpu_to_le16(result
);
4546 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4550 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4552 struct l2cap_move_chan_cfm cfm
;
4554 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4556 chan
->ident
= l2cap_get_ident(chan
->conn
);
4558 cfm
.icid
= cpu_to_le16(chan
->scid
);
4559 cfm
.result
= cpu_to_le16(result
);
4561 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4564 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4567 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4569 struct l2cap_move_chan_cfm cfm
;
4571 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4573 cfm
.icid
= cpu_to_le16(icid
);
4574 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4576 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4580 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4583 struct l2cap_move_chan_cfm_rsp rsp
;
4585 BT_DBG("icid 0x%4.4x", icid
);
4587 rsp
.icid
= cpu_to_le16(icid
);
4588 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4591 static void __release_logical_link(struct l2cap_chan
*chan
)
4593 chan
->hs_hchan
= NULL
;
4594 chan
->hs_hcon
= NULL
;
4596 /* Placeholder - release the logical link */
4599 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4601 /* Logical link setup failed */
4602 if (chan
->state
!= BT_CONNECTED
) {
4603 /* Create channel failure, disconnect */
4604 l2cap_send_disconn_req(chan
, ECONNRESET
);
4608 switch (chan
->move_role
) {
4609 case L2CAP_MOVE_ROLE_RESPONDER
:
4610 l2cap_move_done(chan
);
4611 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4613 case L2CAP_MOVE_ROLE_INITIATOR
:
4614 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4615 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4616 /* Remote has only sent pending or
4617 * success responses, clean up
4619 l2cap_move_done(chan
);
4622 /* Other amp move states imply that the move
4623 * has already aborted
4625 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4630 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4631 struct hci_chan
*hchan
)
4633 struct l2cap_conf_rsp rsp
;
4635 chan
->hs_hchan
= hchan
;
4636 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4638 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4640 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4643 set_default_fcs(chan
);
4645 err
= l2cap_ertm_init(chan
);
4647 l2cap_send_disconn_req(chan
, -err
);
4649 l2cap_chan_ready(chan
);
4653 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4654 struct hci_chan
*hchan
)
4656 chan
->hs_hcon
= hchan
->conn
;
4657 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4659 BT_DBG("move_state %d", chan
->move_state
);
4661 switch (chan
->move_state
) {
4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4663 /* Move confirm will be sent after a success
4664 * response is received
4666 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4669 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4670 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4671 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4672 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4673 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4674 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4675 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4676 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4680 /* Move was not in expected state, free the channel */
4681 __release_logical_link(chan
);
4683 chan
->move_state
= L2CAP_MOVE_STABLE
;
4687 /* Call with chan locked */
4688 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4691 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4694 l2cap_logical_fail(chan
);
4695 __release_logical_link(chan
);
4699 if (chan
->state
!= BT_CONNECTED
) {
4700 /* Ignore logical link if channel is on BR/EDR */
4701 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4702 l2cap_logical_finish_create(chan
, hchan
);
4704 l2cap_logical_finish_move(chan
, hchan
);
4708 void l2cap_move_start(struct l2cap_chan
*chan
)
4710 BT_DBG("chan %p", chan
);
4712 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4713 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4715 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4716 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4717 /* Placeholder - start physical link setup */
4719 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4720 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4722 l2cap_move_setup(chan
);
4723 l2cap_send_move_chan_req(chan
, 0);
4727 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4728 u8 local_amp_id
, u8 remote_amp_id
)
4730 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4731 local_amp_id
, remote_amp_id
);
4733 chan
->fcs
= L2CAP_FCS_NONE
;
4735 /* Outgoing channel on AMP */
4736 if (chan
->state
== BT_CONNECT
) {
4737 if (result
== L2CAP_CR_SUCCESS
) {
4738 chan
->local_amp_id
= local_amp_id
;
4739 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4741 /* Revert to BR/EDR connect */
4742 l2cap_send_conn_req(chan
);
4748 /* Incoming channel on AMP */
4749 if (__l2cap_no_conn_pending(chan
)) {
4750 struct l2cap_conn_rsp rsp
;
4752 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4753 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4755 if (result
== L2CAP_CR_SUCCESS
) {
4756 /* Send successful response */
4757 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4758 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4760 /* Send negative response */
4761 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4762 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4765 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4768 if (result
== L2CAP_CR_SUCCESS
) {
4769 l2cap_state_change(chan
, BT_CONFIG
);
4770 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4771 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4773 l2cap_build_conf_req(chan
, buf
), buf
);
4774 chan
->num_conf_req
++;
4779 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4782 l2cap_move_setup(chan
);
4783 chan
->move_id
= local_amp_id
;
4784 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4786 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4789 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4791 struct hci_chan
*hchan
= NULL
;
4793 /* Placeholder - get hci_chan for logical link */
4796 if (hchan
->state
== BT_CONNECTED
) {
4797 /* Logical link is ready to go */
4798 chan
->hs_hcon
= hchan
->conn
;
4799 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4800 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4801 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4803 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4805 /* Wait for logical link to be ready */
4806 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4809 /* Logical link not available */
4810 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4814 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4816 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4818 if (result
== -EINVAL
)
4819 rsp_result
= L2CAP_MR_BAD_ID
;
4821 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4823 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4826 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4827 chan
->move_state
= L2CAP_MOVE_STABLE
;
4829 /* Restart data transmission */
4830 l2cap_ertm_send(chan
);
4833 /* Invoke with locked chan */
4834 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4836 u8 local_amp_id
= chan
->local_amp_id
;
4837 u8 remote_amp_id
= chan
->remote_amp_id
;
4839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4840 chan
, result
, local_amp_id
, remote_amp_id
);
4842 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4843 l2cap_chan_unlock(chan
);
4847 if (chan
->state
!= BT_CONNECTED
) {
4848 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4849 } else if (result
!= L2CAP_MR_SUCCESS
) {
4850 l2cap_do_move_cancel(chan
, result
);
4852 switch (chan
->move_role
) {
4853 case L2CAP_MOVE_ROLE_INITIATOR
:
4854 l2cap_do_move_initiate(chan
, local_amp_id
,
4857 case L2CAP_MOVE_ROLE_RESPONDER
:
4858 l2cap_do_move_respond(chan
, result
);
4861 l2cap_do_move_cancel(chan
, result
);
4867 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4868 struct l2cap_cmd_hdr
*cmd
,
4869 u16 cmd_len
, void *data
)
4871 struct l2cap_move_chan_req
*req
= data
;
4872 struct l2cap_move_chan_rsp rsp
;
4873 struct l2cap_chan
*chan
;
4875 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4877 if (cmd_len
!= sizeof(*req
))
4880 icid
= le16_to_cpu(req
->icid
);
4882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4884 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4887 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4889 rsp
.icid
= cpu_to_le16(icid
);
4890 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4891 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4896 chan
->ident
= cmd
->ident
;
4898 if (chan
->scid
< L2CAP_CID_DYN_START
||
4899 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4900 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4901 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4902 result
= L2CAP_MR_NOT_ALLOWED
;
4903 goto send_move_response
;
4906 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4907 result
= L2CAP_MR_SAME_ID
;
4908 goto send_move_response
;
4911 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4912 struct hci_dev
*hdev
;
4913 hdev
= hci_dev_get(req
->dest_amp_id
);
4914 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4915 !test_bit(HCI_UP
, &hdev
->flags
)) {
4919 result
= L2CAP_MR_BAD_ID
;
4920 goto send_move_response
;
4925 /* Detect a move collision. Only send a collision response
4926 * if this side has "lost", otherwise proceed with the move.
4927 * The winner has the larger bd_addr.
4929 if ((__chan_is_moving(chan
) ||
4930 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4931 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4932 result
= L2CAP_MR_COLLISION
;
4933 goto send_move_response
;
4936 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4937 l2cap_move_setup(chan
);
4938 chan
->move_id
= req
->dest_amp_id
;
4941 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4942 /* Moving to BR/EDR */
4943 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4944 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4945 result
= L2CAP_MR_PEND
;
4947 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4948 result
= L2CAP_MR_SUCCESS
;
4951 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4952 /* Placeholder - uncomment when amp functions are available */
4953 /*amp_accept_physical(chan, req->dest_amp_id);*/
4954 result
= L2CAP_MR_PEND
;
4958 l2cap_send_move_chan_rsp(chan
, result
);
4960 l2cap_chan_unlock(chan
);
4965 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4967 struct l2cap_chan
*chan
;
4968 struct hci_chan
*hchan
= NULL
;
4970 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4972 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4976 __clear_chan_timer(chan
);
4977 if (result
== L2CAP_MR_PEND
)
4978 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4980 switch (chan
->move_state
) {
4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4982 /* Move confirm will be sent when logical link
4985 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4988 if (result
== L2CAP_MR_PEND
) {
4990 } else if (test_bit(CONN_LOCAL_BUSY
,
4991 &chan
->conn_state
)) {
4992 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4994 /* Logical link is up or moving to BR/EDR,
4997 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4998 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5001 case L2CAP_MOVE_WAIT_RSP
:
5003 if (result
== L2CAP_MR_SUCCESS
) {
5004 /* Remote is ready, send confirm immediately
5005 * after logical link is ready
5007 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5009 /* Both logical link and move success
5010 * are required to confirm
5012 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5015 /* Placeholder - get hci_chan for logical link */
5017 /* Logical link not available */
5018 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5022 /* If the logical link is not yet connected, do not
5023 * send confirmation.
5025 if (hchan
->state
!= BT_CONNECTED
)
5028 /* Logical link is already ready to go */
5030 chan
->hs_hcon
= hchan
->conn
;
5031 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5033 if (result
== L2CAP_MR_SUCCESS
) {
5034 /* Can confirm now */
5035 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5037 /* Now only need move success
5040 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5043 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5046 /* Any other amp move state means the move failed. */
5047 chan
->move_id
= chan
->local_amp_id
;
5048 l2cap_move_done(chan
);
5049 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5052 l2cap_chan_unlock(chan
);
5055 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5058 struct l2cap_chan
*chan
;
5060 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5062 /* Could not locate channel, icid is best guess */
5063 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5067 __clear_chan_timer(chan
);
5069 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5070 if (result
== L2CAP_MR_COLLISION
) {
5071 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5073 /* Cleanup - cancel move */
5074 chan
->move_id
= chan
->local_amp_id
;
5075 l2cap_move_done(chan
);
5079 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5081 l2cap_chan_unlock(chan
);
5084 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5085 struct l2cap_cmd_hdr
*cmd
,
5086 u16 cmd_len
, void *data
)
5088 struct l2cap_move_chan_rsp
*rsp
= data
;
5091 if (cmd_len
!= sizeof(*rsp
))
5094 icid
= le16_to_cpu(rsp
->icid
);
5095 result
= le16_to_cpu(rsp
->result
);
5097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5099 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5100 l2cap_move_continue(conn
, icid
, result
);
5102 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5107 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5108 struct l2cap_cmd_hdr
*cmd
,
5109 u16 cmd_len
, void *data
)
5111 struct l2cap_move_chan_cfm
*cfm
= data
;
5112 struct l2cap_chan
*chan
;
5115 if (cmd_len
!= sizeof(*cfm
))
5118 icid
= le16_to_cpu(cfm
->icid
);
5119 result
= le16_to_cpu(cfm
->result
);
5121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5123 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5125 /* Spec requires a response even if the icid was not found */
5126 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5130 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5131 if (result
== L2CAP_MC_CONFIRMED
) {
5132 chan
->local_amp_id
= chan
->move_id
;
5133 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5134 __release_logical_link(chan
);
5136 chan
->move_id
= chan
->local_amp_id
;
5139 l2cap_move_done(chan
);
5142 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5144 l2cap_chan_unlock(chan
);
5149 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5150 struct l2cap_cmd_hdr
*cmd
,
5151 u16 cmd_len
, void *data
)
5153 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5154 struct l2cap_chan
*chan
;
5157 if (cmd_len
!= sizeof(*rsp
))
5160 icid
= le16_to_cpu(rsp
->icid
);
5162 BT_DBG("icid 0x%4.4x", icid
);
5164 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5168 __clear_chan_timer(chan
);
5170 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5171 chan
->local_amp_id
= chan
->move_id
;
5173 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5174 __release_logical_link(chan
);
5176 l2cap_move_done(chan
);
5179 l2cap_chan_unlock(chan
);
5184 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5185 struct l2cap_cmd_hdr
*cmd
,
5186 u16 cmd_len
, u8
*data
)
5188 struct hci_conn
*hcon
= conn
->hcon
;
5189 struct l2cap_conn_param_update_req
*req
;
5190 struct l2cap_conn_param_update_rsp rsp
;
5191 u16 min
, max
, latency
, to_multiplier
;
5194 if (hcon
->role
!= HCI_ROLE_MASTER
)
5197 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5200 req
= (struct l2cap_conn_param_update_req
*) data
;
5201 min
= __le16_to_cpu(req
->min
);
5202 max
= __le16_to_cpu(req
->max
);
5203 latency
= __le16_to_cpu(req
->latency
);
5204 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5206 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5207 min
, max
, latency
, to_multiplier
);
5209 memset(&rsp
, 0, sizeof(rsp
));
5211 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5213 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5215 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5217 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5223 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5225 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5226 store_hint
, min
, max
, latency
,
5234 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5235 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5238 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5239 struct hci_conn
*hcon
= conn
->hcon
;
5240 u16 dcid
, mtu
, mps
, credits
, result
;
5241 struct l2cap_chan
*chan
;
5244 if (cmd_len
< sizeof(*rsp
))
5247 dcid
= __le16_to_cpu(rsp
->dcid
);
5248 mtu
= __le16_to_cpu(rsp
->mtu
);
5249 mps
= __le16_to_cpu(rsp
->mps
);
5250 credits
= __le16_to_cpu(rsp
->credits
);
5251 result
= __le16_to_cpu(rsp
->result
);
5253 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23 ||
5254 dcid
< L2CAP_CID_DYN_START
||
5255 dcid
> L2CAP_CID_LE_DYN_END
))
5258 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5259 dcid
, mtu
, mps
, credits
, result
);
5261 mutex_lock(&conn
->chan_lock
);
5263 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5271 l2cap_chan_lock(chan
);
5274 case L2CAP_CR_SUCCESS
:
5275 if (__l2cap_get_chan_by_dcid(conn
, dcid
)) {
5283 chan
->remote_mps
= mps
;
5284 chan
->tx_credits
= credits
;
5285 l2cap_chan_ready(chan
);
5288 case L2CAP_CR_AUTHENTICATION
:
5289 case L2CAP_CR_ENCRYPTION
:
5290 /* If we already have MITM protection we can't do
5293 if (hcon
->sec_level
> BT_SECURITY_MEDIUM
) {
5294 l2cap_chan_del(chan
, ECONNREFUSED
);
5298 sec_level
= hcon
->sec_level
+ 1;
5299 if (chan
->sec_level
< sec_level
)
5300 chan
->sec_level
= sec_level
;
5302 /* We'll need to send a new Connect Request */
5303 clear_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
);
5305 smp_conn_security(hcon
, chan
->sec_level
);
5309 l2cap_chan_del(chan
, ECONNREFUSED
);
5313 l2cap_chan_unlock(chan
);
5316 mutex_unlock(&conn
->chan_lock
);
5321 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5322 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5327 switch (cmd
->code
) {
5328 case L2CAP_COMMAND_REJ
:
5329 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5332 case L2CAP_CONN_REQ
:
5333 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5336 case L2CAP_CONN_RSP
:
5337 case L2CAP_CREATE_CHAN_RSP
:
5338 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5341 case L2CAP_CONF_REQ
:
5342 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5345 case L2CAP_CONF_RSP
:
5346 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5349 case L2CAP_DISCONN_REQ
:
5350 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5353 case L2CAP_DISCONN_RSP
:
5354 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5357 case L2CAP_ECHO_REQ
:
5358 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5361 case L2CAP_ECHO_RSP
:
5364 case L2CAP_INFO_REQ
:
5365 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5368 case L2CAP_INFO_RSP
:
5369 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5372 case L2CAP_CREATE_CHAN_REQ
:
5373 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5376 case L2CAP_MOVE_CHAN_REQ
:
5377 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5380 case L2CAP_MOVE_CHAN_RSP
:
5381 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5384 case L2CAP_MOVE_CHAN_CFM
:
5385 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5388 case L2CAP_MOVE_CHAN_CFM_RSP
:
5389 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5393 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5401 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5402 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5405 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5406 struct l2cap_le_conn_rsp rsp
;
5407 struct l2cap_chan
*chan
, *pchan
;
5408 u16 dcid
, scid
, credits
, mtu
, mps
;
5412 if (cmd_len
!= sizeof(*req
))
5415 scid
= __le16_to_cpu(req
->scid
);
5416 mtu
= __le16_to_cpu(req
->mtu
);
5417 mps
= __le16_to_cpu(req
->mps
);
5422 if (mtu
< 23 || mps
< 23)
5425 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5428 /* Check if we have socket listening on psm */
5429 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5430 &conn
->hcon
->dst
, LE_LINK
);
5432 result
= L2CAP_CR_BAD_PSM
;
5437 mutex_lock(&conn
->chan_lock
);
5438 l2cap_chan_lock(pchan
);
5440 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
,
5442 result
= L2CAP_CR_AUTHENTICATION
;
5444 goto response_unlock
;
5447 /* Check for valid dynamic CID range */
5448 if (scid
< L2CAP_CID_DYN_START
|| scid
> L2CAP_CID_LE_DYN_END
) {
5449 result
= L2CAP_CR_INVALID_SCID
;
5451 goto response_unlock
;
5454 /* Check if we already have channel with that dcid */
5455 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5456 result
= L2CAP_CR_SCID_IN_USE
;
5458 goto response_unlock
;
5461 chan
= pchan
->ops
->new_connection(pchan
);
5463 result
= L2CAP_CR_NO_MEM
;
5464 goto response_unlock
;
5467 l2cap_le_flowctl_init(chan
);
5469 bacpy(&chan
->src
, &conn
->hcon
->src
);
5470 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5471 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
5472 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
5476 chan
->remote_mps
= mps
;
5477 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5479 __l2cap_chan_add(conn
, chan
);
5481 credits
= chan
->rx_credits
;
5483 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5485 chan
->ident
= cmd
->ident
;
5487 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5488 l2cap_state_change(chan
, BT_CONNECT2
);
5489 /* The following result value is actually not defined
5490 * for LE CoC but we use it to let the function know
5491 * that it should bail out after doing its cleanup
5492 * instead of sending a response.
5494 result
= L2CAP_CR_PEND
;
5495 chan
->ops
->defer(chan
);
5497 l2cap_chan_ready(chan
);
5498 result
= L2CAP_CR_SUCCESS
;
5502 l2cap_chan_unlock(pchan
);
5503 mutex_unlock(&conn
->chan_lock
);
5504 l2cap_chan_put(pchan
);
5506 if (result
== L2CAP_CR_PEND
)
5511 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5512 rsp
.mps
= cpu_to_le16(chan
->mps
);
5518 rsp
.dcid
= cpu_to_le16(dcid
);
5519 rsp
.credits
= cpu_to_le16(credits
);
5520 rsp
.result
= cpu_to_le16(result
);
5522 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5527 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5528 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5531 struct l2cap_le_credits
*pkt
;
5532 struct l2cap_chan
*chan
;
5533 u16 cid
, credits
, max_credits
;
5535 if (cmd_len
!= sizeof(*pkt
))
5538 pkt
= (struct l2cap_le_credits
*) data
;
5539 cid
= __le16_to_cpu(pkt
->cid
);
5540 credits
= __le16_to_cpu(pkt
->credits
);
5542 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5544 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5548 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5549 if (credits
> max_credits
) {
5550 BT_ERR("LE credits overflow");
5551 l2cap_send_disconn_req(chan
, ECONNRESET
);
5552 l2cap_chan_unlock(chan
);
5554 /* Return 0 so that we don't trigger an unnecessary
5555 * command reject packet.
5560 chan
->tx_credits
+= credits
;
5562 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5563 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5567 if (chan
->tx_credits
)
5568 chan
->ops
->resume(chan
);
5570 l2cap_chan_unlock(chan
);
5575 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5576 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5579 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5580 struct l2cap_chan
*chan
;
5582 if (cmd_len
< sizeof(*rej
))
5585 mutex_lock(&conn
->chan_lock
);
5587 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5591 l2cap_chan_lock(chan
);
5592 l2cap_chan_del(chan
, ECONNREFUSED
);
5593 l2cap_chan_unlock(chan
);
5596 mutex_unlock(&conn
->chan_lock
);
5600 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5601 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5606 switch (cmd
->code
) {
5607 case L2CAP_COMMAND_REJ
:
5608 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5611 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5612 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5615 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5618 case L2CAP_LE_CONN_RSP
:
5619 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5622 case L2CAP_LE_CONN_REQ
:
5623 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5626 case L2CAP_LE_CREDITS
:
5627 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5630 case L2CAP_DISCONN_REQ
:
5631 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5634 case L2CAP_DISCONN_RSP
:
5635 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5639 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5647 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5648 struct sk_buff
*skb
)
5650 struct hci_conn
*hcon
= conn
->hcon
;
5651 struct l2cap_cmd_hdr
*cmd
;
5655 if (hcon
->type
!= LE_LINK
)
5658 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5661 cmd
= (void *) skb
->data
;
5662 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5664 len
= le16_to_cpu(cmd
->len
);
5666 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5668 if (len
!= skb
->len
|| !cmd
->ident
) {
5669 BT_DBG("corrupted command");
5673 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5675 struct l2cap_cmd_rej_unk rej
;
5677 BT_ERR("Wrong link type (%d)", err
);
5679 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5680 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5688 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5689 struct sk_buff
*skb
)
5691 struct hci_conn
*hcon
= conn
->hcon
;
5692 u8
*data
= skb
->data
;
5694 struct l2cap_cmd_hdr cmd
;
5697 l2cap_raw_recv(conn
, skb
);
5699 if (hcon
->type
!= ACL_LINK
)
5702 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5704 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5705 data
+= L2CAP_CMD_HDR_SIZE
;
5706 len
-= L2CAP_CMD_HDR_SIZE
;
5708 cmd_len
= le16_to_cpu(cmd
.len
);
5710 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5713 if (cmd_len
> len
|| !cmd
.ident
) {
5714 BT_DBG("corrupted command");
5718 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5720 struct l2cap_cmd_rej_unk rej
;
5722 BT_ERR("Wrong link type (%d)", err
);
5724 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5725 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5737 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5739 u16 our_fcs
, rcv_fcs
;
5742 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5743 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5745 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5747 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5748 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5749 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5750 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5752 if (our_fcs
!= rcv_fcs
)
5758 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5760 struct l2cap_ctrl control
;
5762 BT_DBG("chan %p", chan
);
5764 memset(&control
, 0, sizeof(control
));
5767 control
.reqseq
= chan
->buffer_seq
;
5768 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5770 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5771 control
.super
= L2CAP_SUPER_RNR
;
5772 l2cap_send_sframe(chan
, &control
);
5775 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5776 chan
->unacked_frames
> 0)
5777 __set_retrans_timer(chan
);
5779 /* Send pending iframes */
5780 l2cap_ertm_send(chan
);
5782 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5783 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5784 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5787 control
.super
= L2CAP_SUPER_RR
;
5788 l2cap_send_sframe(chan
, &control
);
5792 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5793 struct sk_buff
**last_frag
)
5795 /* skb->len reflects data in skb as well as all fragments
5796 * skb->data_len reflects only data in fragments
5798 if (!skb_has_frag_list(skb
))
5799 skb_shinfo(skb
)->frag_list
= new_frag
;
5801 new_frag
->next
= NULL
;
5803 (*last_frag
)->next
= new_frag
;
5804 *last_frag
= new_frag
;
5806 skb
->len
+= new_frag
->len
;
5807 skb
->data_len
+= new_frag
->len
;
5808 skb
->truesize
+= new_frag
->truesize
;
5811 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5812 struct l2cap_ctrl
*control
)
5816 switch (control
->sar
) {
5817 case L2CAP_SAR_UNSEGMENTED
:
5821 err
= chan
->ops
->recv(chan
, skb
);
5824 case L2CAP_SAR_START
:
5828 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5829 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5831 if (chan
->sdu_len
> chan
->imtu
) {
5836 if (skb
->len
>= chan
->sdu_len
)
5840 chan
->sdu_last_frag
= skb
;
5846 case L2CAP_SAR_CONTINUE
:
5850 append_skb_frag(chan
->sdu
, skb
,
5851 &chan
->sdu_last_frag
);
5854 if (chan
->sdu
->len
>= chan
->sdu_len
)
5864 append_skb_frag(chan
->sdu
, skb
,
5865 &chan
->sdu_last_frag
);
5868 if (chan
->sdu
->len
!= chan
->sdu_len
)
5871 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5874 /* Reassembly complete */
5876 chan
->sdu_last_frag
= NULL
;
5884 kfree_skb(chan
->sdu
);
5886 chan
->sdu_last_frag
= NULL
;
5893 static int l2cap_resegment(struct l2cap_chan
*chan
)
5899 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5903 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5906 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5907 l2cap_tx(chan
, NULL
, NULL
, event
);
5910 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5913 /* Pass sequential frames to l2cap_reassemble_sdu()
5914 * until a gap is encountered.
5917 BT_DBG("chan %p", chan
);
5919 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5920 struct sk_buff
*skb
;
5921 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5922 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5924 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5929 skb_unlink(skb
, &chan
->srej_q
);
5930 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5931 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->l2cap
);
5936 if (skb_queue_empty(&chan
->srej_q
)) {
5937 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5938 l2cap_send_ack(chan
);
5944 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5945 struct l2cap_ctrl
*control
)
5947 struct sk_buff
*skb
;
5949 BT_DBG("chan %p, control %p", chan
, control
);
5951 if (control
->reqseq
== chan
->next_tx_seq
) {
5952 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5953 l2cap_send_disconn_req(chan
, ECONNRESET
);
5957 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5960 BT_DBG("Seq %d not available for retransmission",
5965 if (chan
->max_tx
!= 0 && bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
5966 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5967 l2cap_send_disconn_req(chan
, ECONNRESET
);
5971 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5973 if (control
->poll
) {
5974 l2cap_pass_to_tx(chan
, control
);
5976 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5977 l2cap_retransmit(chan
, control
);
5978 l2cap_ertm_send(chan
);
5980 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5981 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5982 chan
->srej_save_reqseq
= control
->reqseq
;
5985 l2cap_pass_to_tx_fbit(chan
, control
);
5987 if (control
->final
) {
5988 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5989 !test_and_clear_bit(CONN_SREJ_ACT
,
5991 l2cap_retransmit(chan
, control
);
5993 l2cap_retransmit(chan
, control
);
5994 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5995 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5996 chan
->srej_save_reqseq
= control
->reqseq
;
6002 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
6003 struct l2cap_ctrl
*control
)
6005 struct sk_buff
*skb
;
6007 BT_DBG("chan %p, control %p", chan
, control
);
6009 if (control
->reqseq
== chan
->next_tx_seq
) {
6010 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6011 l2cap_send_disconn_req(chan
, ECONNRESET
);
6015 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6017 if (chan
->max_tx
&& skb
&&
6018 bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
6019 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6020 l2cap_send_disconn_req(chan
, ECONNRESET
);
6024 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6026 l2cap_pass_to_tx(chan
, control
);
6028 if (control
->final
) {
6029 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6030 l2cap_retransmit_all(chan
, control
);
6032 l2cap_retransmit_all(chan
, control
);
6033 l2cap_ertm_send(chan
);
6034 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6035 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6039 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6041 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6043 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6044 chan
->expected_tx_seq
);
6046 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6047 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6049 /* See notes below regarding "double poll" and
6052 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6053 BT_DBG("Invalid/Ignore - after SREJ");
6054 return L2CAP_TXSEQ_INVALID_IGNORE
;
6056 BT_DBG("Invalid - in window after SREJ sent");
6057 return L2CAP_TXSEQ_INVALID
;
6061 if (chan
->srej_list
.head
== txseq
) {
6062 BT_DBG("Expected SREJ");
6063 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6066 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6067 BT_DBG("Duplicate SREJ - txseq already stored");
6068 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6071 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6072 BT_DBG("Unexpected SREJ - not requested");
6073 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6077 if (chan
->expected_tx_seq
== txseq
) {
6078 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6080 BT_DBG("Invalid - txseq outside tx window");
6081 return L2CAP_TXSEQ_INVALID
;
6084 return L2CAP_TXSEQ_EXPECTED
;
6088 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6089 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6090 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6091 return L2CAP_TXSEQ_DUPLICATE
;
6094 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6095 /* A source of invalid packets is a "double poll" condition,
6096 * where delays cause us to send multiple poll packets. If
6097 * the remote stack receives and processes both polls,
6098 * sequence numbers can wrap around in such a way that a
6099 * resent frame has a sequence number that looks like new data
6100 * with a sequence gap. This would trigger an erroneous SREJ
6103 * Fortunately, this is impossible with a tx window that's
6104 * less than half of the maximum sequence number, which allows
6105 * invalid frames to be safely ignored.
6107 * With tx window sizes greater than half of the tx window
6108 * maximum, the frame is invalid and cannot be ignored. This
6109 * causes a disconnect.
6112 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6113 BT_DBG("Invalid/Ignore - txseq outside tx window");
6114 return L2CAP_TXSEQ_INVALID_IGNORE
;
6116 BT_DBG("Invalid - txseq outside tx window");
6117 return L2CAP_TXSEQ_INVALID
;
6120 BT_DBG("Unexpected - txseq indicates missing frames");
6121 return L2CAP_TXSEQ_UNEXPECTED
;
6125 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6126 struct l2cap_ctrl
*control
,
6127 struct sk_buff
*skb
, u8 event
)
6130 bool skb_in_use
= false;
6132 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6136 case L2CAP_EV_RECV_IFRAME
:
6137 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6138 case L2CAP_TXSEQ_EXPECTED
:
6139 l2cap_pass_to_tx(chan
, control
);
6141 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6142 BT_DBG("Busy, discarding expected seq %d",
6147 chan
->expected_tx_seq
= __next_seq(chan
,
6150 chan
->buffer_seq
= chan
->expected_tx_seq
;
6153 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6157 if (control
->final
) {
6158 if (!test_and_clear_bit(CONN_REJ_ACT
,
6159 &chan
->conn_state
)) {
6161 l2cap_retransmit_all(chan
, control
);
6162 l2cap_ertm_send(chan
);
6166 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6167 l2cap_send_ack(chan
);
6169 case L2CAP_TXSEQ_UNEXPECTED
:
6170 l2cap_pass_to_tx(chan
, control
);
6172 /* Can't issue SREJ frames in the local busy state.
6173 * Drop this frame, it will be seen as missing
6174 * when local busy is exited.
6176 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6177 BT_DBG("Busy, discarding unexpected seq %d",
6182 /* There was a gap in the sequence, so an SREJ
6183 * must be sent for each missing frame. The
6184 * current frame is stored for later use.
6186 skb_queue_tail(&chan
->srej_q
, skb
);
6188 BT_DBG("Queued %p (queue len %d)", skb
,
6189 skb_queue_len(&chan
->srej_q
));
6191 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6192 l2cap_seq_list_clear(&chan
->srej_list
);
6193 l2cap_send_srej(chan
, control
->txseq
);
6195 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6197 case L2CAP_TXSEQ_DUPLICATE
:
6198 l2cap_pass_to_tx(chan
, control
);
6200 case L2CAP_TXSEQ_INVALID_IGNORE
:
6202 case L2CAP_TXSEQ_INVALID
:
6204 l2cap_send_disconn_req(chan
, ECONNRESET
);
6208 case L2CAP_EV_RECV_RR
:
6209 l2cap_pass_to_tx(chan
, control
);
6210 if (control
->final
) {
6211 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6213 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6214 !__chan_is_moving(chan
)) {
6216 l2cap_retransmit_all(chan
, control
);
6219 l2cap_ertm_send(chan
);
6220 } else if (control
->poll
) {
6221 l2cap_send_i_or_rr_or_rnr(chan
);
6223 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6224 &chan
->conn_state
) &&
6225 chan
->unacked_frames
)
6226 __set_retrans_timer(chan
);
6228 l2cap_ertm_send(chan
);
6231 case L2CAP_EV_RECV_RNR
:
6232 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6233 l2cap_pass_to_tx(chan
, control
);
6234 if (control
&& control
->poll
) {
6235 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6236 l2cap_send_rr_or_rnr(chan
, 0);
6238 __clear_retrans_timer(chan
);
6239 l2cap_seq_list_clear(&chan
->retrans_list
);
6241 case L2CAP_EV_RECV_REJ
:
6242 l2cap_handle_rej(chan
, control
);
6244 case L2CAP_EV_RECV_SREJ
:
6245 l2cap_handle_srej(chan
, control
);
6251 if (skb
&& !skb_in_use
) {
6252 BT_DBG("Freeing %p", skb
);
6259 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6260 struct l2cap_ctrl
*control
,
6261 struct sk_buff
*skb
, u8 event
)
6264 u16 txseq
= control
->txseq
;
6265 bool skb_in_use
= false;
6267 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6271 case L2CAP_EV_RECV_IFRAME
:
6272 switch (l2cap_classify_txseq(chan
, txseq
)) {
6273 case L2CAP_TXSEQ_EXPECTED
:
6274 /* Keep frame for reassembly later */
6275 l2cap_pass_to_tx(chan
, control
);
6276 skb_queue_tail(&chan
->srej_q
, skb
);
6278 BT_DBG("Queued %p (queue len %d)", skb
,
6279 skb_queue_len(&chan
->srej_q
));
6281 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6283 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6284 l2cap_seq_list_pop(&chan
->srej_list
);
6286 l2cap_pass_to_tx(chan
, control
);
6287 skb_queue_tail(&chan
->srej_q
, skb
);
6289 BT_DBG("Queued %p (queue len %d)", skb
,
6290 skb_queue_len(&chan
->srej_q
));
6292 err
= l2cap_rx_queued_iframes(chan
);
6297 case L2CAP_TXSEQ_UNEXPECTED
:
6298 /* Got a frame that can't be reassembled yet.
6299 * Save it for later, and send SREJs to cover
6300 * the missing frames.
6302 skb_queue_tail(&chan
->srej_q
, skb
);
6304 BT_DBG("Queued %p (queue len %d)", skb
,
6305 skb_queue_len(&chan
->srej_q
));
6307 l2cap_pass_to_tx(chan
, control
);
6308 l2cap_send_srej(chan
, control
->txseq
);
6310 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6311 /* This frame was requested with an SREJ, but
6312 * some expected retransmitted frames are
6313 * missing. Request retransmission of missing
6316 skb_queue_tail(&chan
->srej_q
, skb
);
6318 BT_DBG("Queued %p (queue len %d)", skb
,
6319 skb_queue_len(&chan
->srej_q
));
6321 l2cap_pass_to_tx(chan
, control
);
6322 l2cap_send_srej_list(chan
, control
->txseq
);
6324 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6325 /* We've already queued this frame. Drop this copy. */
6326 l2cap_pass_to_tx(chan
, control
);
6328 case L2CAP_TXSEQ_DUPLICATE
:
6329 /* Expecting a later sequence number, so this frame
6330 * was already received. Ignore it completely.
6333 case L2CAP_TXSEQ_INVALID_IGNORE
:
6335 case L2CAP_TXSEQ_INVALID
:
6337 l2cap_send_disconn_req(chan
, ECONNRESET
);
6341 case L2CAP_EV_RECV_RR
:
6342 l2cap_pass_to_tx(chan
, control
);
6343 if (control
->final
) {
6344 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6346 if (!test_and_clear_bit(CONN_REJ_ACT
,
6347 &chan
->conn_state
)) {
6349 l2cap_retransmit_all(chan
, control
);
6352 l2cap_ertm_send(chan
);
6353 } else if (control
->poll
) {
6354 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6355 &chan
->conn_state
) &&
6356 chan
->unacked_frames
) {
6357 __set_retrans_timer(chan
);
6360 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6361 l2cap_send_srej_tail(chan
);
6363 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6364 &chan
->conn_state
) &&
6365 chan
->unacked_frames
)
6366 __set_retrans_timer(chan
);
6368 l2cap_send_ack(chan
);
6371 case L2CAP_EV_RECV_RNR
:
6372 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6373 l2cap_pass_to_tx(chan
, control
);
6374 if (control
->poll
) {
6375 l2cap_send_srej_tail(chan
);
6377 struct l2cap_ctrl rr_control
;
6378 memset(&rr_control
, 0, sizeof(rr_control
));
6379 rr_control
.sframe
= 1;
6380 rr_control
.super
= L2CAP_SUPER_RR
;
6381 rr_control
.reqseq
= chan
->buffer_seq
;
6382 l2cap_send_sframe(chan
, &rr_control
);
6386 case L2CAP_EV_RECV_REJ
:
6387 l2cap_handle_rej(chan
, control
);
6389 case L2CAP_EV_RECV_SREJ
:
6390 l2cap_handle_srej(chan
, control
);
6394 if (skb
&& !skb_in_use
) {
6395 BT_DBG("Freeing %p", skb
);
6402 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6404 BT_DBG("chan %p", chan
);
6406 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6409 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6411 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6413 return l2cap_resegment(chan
);
6416 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6417 struct l2cap_ctrl
*control
,
6418 struct sk_buff
*skb
, u8 event
)
6422 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6428 l2cap_process_reqseq(chan
, control
->reqseq
);
6430 if (!skb_queue_empty(&chan
->tx_q
))
6431 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6433 chan
->tx_send_head
= NULL
;
6435 /* Rewind next_tx_seq to the point expected
6438 chan
->next_tx_seq
= control
->reqseq
;
6439 chan
->unacked_frames
= 0;
6441 err
= l2cap_finish_move(chan
);
6445 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6446 l2cap_send_i_or_rr_or_rnr(chan
);
6448 if (event
== L2CAP_EV_RECV_IFRAME
)
6451 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6454 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6455 struct l2cap_ctrl
*control
,
6456 struct sk_buff
*skb
, u8 event
)
6460 if (!control
->final
)
6463 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6465 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6466 l2cap_process_reqseq(chan
, control
->reqseq
);
6468 if (!skb_queue_empty(&chan
->tx_q
))
6469 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6471 chan
->tx_send_head
= NULL
;
6473 /* Rewind next_tx_seq to the point expected
6476 chan
->next_tx_seq
= control
->reqseq
;
6477 chan
->unacked_frames
= 0;
6480 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6482 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6484 err
= l2cap_resegment(chan
);
6487 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6492 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6494 /* Make sure reqseq is for a packet that has been sent but not acked */
6497 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6498 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6501 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6502 struct sk_buff
*skb
, u8 event
)
6506 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6507 control
, skb
, event
, chan
->rx_state
);
6509 if (__valid_reqseq(chan
, control
->reqseq
)) {
6510 switch (chan
->rx_state
) {
6511 case L2CAP_RX_STATE_RECV
:
6512 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6514 case L2CAP_RX_STATE_SREJ_SENT
:
6515 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6518 case L2CAP_RX_STATE_WAIT_P
:
6519 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6521 case L2CAP_RX_STATE_WAIT_F
:
6522 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6529 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6530 control
->reqseq
, chan
->next_tx_seq
,
6531 chan
->expected_ack_seq
);
6532 l2cap_send_disconn_req(chan
, ECONNRESET
);
6538 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6539 struct sk_buff
*skb
)
6543 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6546 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6547 L2CAP_TXSEQ_EXPECTED
) {
6548 l2cap_pass_to_tx(chan
, control
);
6550 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6551 __next_seq(chan
, chan
->buffer_seq
));
6553 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6555 l2cap_reassemble_sdu(chan
, skb
, control
);
6558 kfree_skb(chan
->sdu
);
6561 chan
->sdu_last_frag
= NULL
;
6565 BT_DBG("Freeing %p", skb
);
6570 chan
->last_acked_seq
= control
->txseq
;
6571 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6576 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6578 struct l2cap_ctrl
*control
= &bt_cb(skb
)->l2cap
;
6582 __unpack_control(chan
, skb
);
6587 * We can just drop the corrupted I-frame here.
6588 * Receiver will miss it and start proper recovery
6589 * procedures and ask for retransmission.
6591 if (l2cap_check_fcs(chan
, skb
))
6594 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6595 len
-= L2CAP_SDULEN_SIZE
;
6597 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6598 len
-= L2CAP_FCS_SIZE
;
6600 if (len
> chan
->mps
) {
6601 l2cap_send_disconn_req(chan
, ECONNRESET
);
6605 if (!control
->sframe
) {
6608 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6609 control
->sar
, control
->reqseq
, control
->final
,
6612 /* Validate F-bit - F=0 always valid, F=1 only
6613 * valid in TX WAIT_F
6615 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6618 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6619 event
= L2CAP_EV_RECV_IFRAME
;
6620 err
= l2cap_rx(chan
, control
, skb
, event
);
6622 err
= l2cap_stream_rx(chan
, control
, skb
);
6626 l2cap_send_disconn_req(chan
, ECONNRESET
);
6628 const u8 rx_func_to_event
[4] = {
6629 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6630 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6633 /* Only I-frames are expected in streaming mode */
6634 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6637 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6638 control
->reqseq
, control
->final
, control
->poll
,
6642 BT_ERR("Trailing bytes: %d in sframe", len
);
6643 l2cap_send_disconn_req(chan
, ECONNRESET
);
6647 /* Validate F and P bits */
6648 if (control
->final
&& (control
->poll
||
6649 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6652 event
= rx_func_to_event
[control
->super
];
6653 if (l2cap_rx(chan
, control
, skb
, event
))
6654 l2cap_send_disconn_req(chan
, ECONNRESET
);
6664 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6666 struct l2cap_conn
*conn
= chan
->conn
;
6667 struct l2cap_le_credits pkt
;
6670 /* We return more credits to the sender only after the amount of
6671 * credits falls below half of the initial amount.
6673 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6676 return_credits
= le_max_credits
- chan
->rx_credits
;
6678 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6680 chan
->rx_credits
+= return_credits
;
6682 pkt
.cid
= cpu_to_le16(chan
->scid
);
6683 pkt
.credits
= cpu_to_le16(return_credits
);
6685 chan
->ident
= l2cap_get_ident(conn
);
6687 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6690 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6694 if (!chan
->rx_credits
) {
6695 BT_ERR("No credits to receive LE L2CAP data");
6696 l2cap_send_disconn_req(chan
, ECONNRESET
);
6700 if (chan
->imtu
< skb
->len
) {
6701 BT_ERR("Too big LE L2CAP PDU");
6706 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6708 l2cap_chan_le_send_credits(chan
);
6715 sdu_len
= get_unaligned_le16(skb
->data
);
6716 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6718 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6719 sdu_len
, skb
->len
, chan
->imtu
);
6721 if (sdu_len
> chan
->imtu
) {
6722 BT_ERR("Too big LE L2CAP SDU length received");
6727 if (skb
->len
> sdu_len
) {
6728 BT_ERR("Too much LE L2CAP data received");
6733 if (skb
->len
== sdu_len
)
6734 return chan
->ops
->recv(chan
, skb
);
6737 chan
->sdu_len
= sdu_len
;
6738 chan
->sdu_last_frag
= skb
;
6743 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6744 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6746 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6747 BT_ERR("Too much LE L2CAP data received");
6752 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6755 if (chan
->sdu
->len
== chan
->sdu_len
) {
6756 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6759 chan
->sdu_last_frag
= NULL
;
6767 kfree_skb(chan
->sdu
);
6769 chan
->sdu_last_frag
= NULL
;
6773 /* We can't return an error here since we took care of the skb
6774 * freeing internally. An error return would cause the caller to
6775 * do a double-free of the skb.
6780 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6781 struct sk_buff
*skb
)
6783 struct l2cap_chan
*chan
;
6785 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6787 if (cid
== L2CAP_CID_A2MP
) {
6788 chan
= a2mp_channel_create(conn
, skb
);
6794 l2cap_chan_lock(chan
);
6796 BT_DBG("unknown cid 0x%4.4x", cid
);
6797 /* Drop packet and return */
6803 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6805 /* If we receive data on a fixed channel before the info req/rsp
6806 * procdure is done simply assume that the channel is supported
6807 * and mark it as ready.
6809 if (chan
->chan_type
== L2CAP_CHAN_FIXED
)
6810 l2cap_chan_ready(chan
);
6812 if (chan
->state
!= BT_CONNECTED
)
6815 switch (chan
->mode
) {
6816 case L2CAP_MODE_LE_FLOWCTL
:
6817 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6822 case L2CAP_MODE_BASIC
:
6823 /* If socket recv buffers overflows we drop data here
6824 * which is *bad* because L2CAP has to be reliable.
6825 * But we don't have any other choice. L2CAP doesn't
6826 * provide flow control mechanism. */
6828 if (chan
->imtu
< skb
->len
) {
6829 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6833 if (!chan
->ops
->recv(chan
, skb
))
6837 case L2CAP_MODE_ERTM
:
6838 case L2CAP_MODE_STREAMING
:
6839 l2cap_data_rcv(chan
, skb
);
6843 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6851 l2cap_chan_unlock(chan
);
6854 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6855 struct sk_buff
*skb
)
6857 struct hci_conn
*hcon
= conn
->hcon
;
6858 struct l2cap_chan
*chan
;
6860 if (hcon
->type
!= ACL_LINK
)
6863 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6868 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6870 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6873 if (chan
->imtu
< skb
->len
)
6876 /* Store remote BD_ADDR and PSM for msg_name */
6877 bacpy(&bt_cb(skb
)->l2cap
.bdaddr
, &hcon
->dst
);
6878 bt_cb(skb
)->l2cap
.psm
= psm
;
6880 if (!chan
->ops
->recv(chan
, skb
)) {
6881 l2cap_chan_put(chan
);
6886 l2cap_chan_put(chan
);
6891 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6893 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6894 struct hci_conn
*hcon
= conn
->hcon
;
6898 if (hcon
->state
!= BT_CONNECTED
) {
6899 BT_DBG("queueing pending rx skb");
6900 skb_queue_tail(&conn
->pending_rx
, skb
);
6904 skb_pull(skb
, L2CAP_HDR_SIZE
);
6905 cid
= __le16_to_cpu(lh
->cid
);
6906 len
= __le16_to_cpu(lh
->len
);
6908 if (len
!= skb
->len
) {
6913 /* Since we can't actively block incoming LE connections we must
6914 * at least ensure that we ignore incoming data from them.
6916 if (hcon
->type
== LE_LINK
&&
6917 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6918 bdaddr_dst_type(hcon
))) {
6923 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6926 case L2CAP_CID_SIGNALING
:
6927 l2cap_sig_channel(conn
, skb
);
6930 case L2CAP_CID_CONN_LESS
:
6931 psm
= get_unaligned((__le16
*) skb
->data
);
6932 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6933 l2cap_conless_channel(conn
, psm
, skb
);
6936 case L2CAP_CID_LE_SIGNALING
:
6937 l2cap_le_sig_channel(conn
, skb
);
6941 l2cap_data_channel(conn
, cid
, skb
);
6946 static void process_pending_rx(struct work_struct
*work
)
6948 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6950 struct sk_buff
*skb
;
6954 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6955 l2cap_recv_frame(conn
, skb
);
6958 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6960 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6961 struct hci_chan
*hchan
;
6966 hchan
= hci_chan_create(hcon
);
6970 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6972 hci_chan_del(hchan
);
6976 kref_init(&conn
->ref
);
6977 hcon
->l2cap_data
= conn
;
6978 conn
->hcon
= hci_conn_get(hcon
);
6979 conn
->hchan
= hchan
;
6981 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6983 switch (hcon
->type
) {
6985 if (hcon
->hdev
->le_mtu
) {
6986 conn
->mtu
= hcon
->hdev
->le_mtu
;
6991 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6995 conn
->feat_mask
= 0;
6997 conn
->local_fixed_chan
= L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
;
6999 if (hcon
->type
== ACL_LINK
&&
7000 hci_dev_test_flag(hcon
->hdev
, HCI_HS_ENABLED
))
7001 conn
->local_fixed_chan
|= L2CAP_FC_A2MP
;
7003 if (hci_dev_test_flag(hcon
->hdev
, HCI_LE_ENABLED
) &&
7004 (bredr_sc_enabled(hcon
->hdev
) ||
7005 hci_dev_test_flag(hcon
->hdev
, HCI_FORCE_BREDR_SMP
)))
7006 conn
->local_fixed_chan
|= L2CAP_FC_SMP_BREDR
;
7008 mutex_init(&conn
->ident_lock
);
7009 mutex_init(&conn
->chan_lock
);
7011 INIT_LIST_HEAD(&conn
->chan_l
);
7012 INIT_LIST_HEAD(&conn
->users
);
7014 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7016 skb_queue_head_init(&conn
->pending_rx
);
7017 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7018 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
7020 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7025 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7029 if (bdaddr_type_is_le(dst_type
))
7030 return (psm
<= 0x00ff);
7032 /* PSM must be odd and lsb of upper byte must be 0 */
7033 return ((psm
& 0x0101) == 0x0001);
7036 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7037 bdaddr_t
*dst
, u8 dst_type
)
7039 struct l2cap_conn
*conn
;
7040 struct hci_conn
*hcon
;
7041 struct hci_dev
*hdev
;
7044 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7045 dst_type
, __le16_to_cpu(psm
));
7047 hdev
= hci_get_route(dst
, &chan
->src
);
7049 return -EHOSTUNREACH
;
7053 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7054 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7059 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7064 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7069 switch (chan
->mode
) {
7070 case L2CAP_MODE_BASIC
:
7072 case L2CAP_MODE_LE_FLOWCTL
:
7073 l2cap_le_flowctl_init(chan
);
7075 case L2CAP_MODE_ERTM
:
7076 case L2CAP_MODE_STREAMING
:
7085 switch (chan
->state
) {
7089 /* Already connecting */
7094 /* Already connected */
7108 /* Set destination address and psm */
7109 bacpy(&chan
->dst
, dst
);
7110 chan
->dst_type
= dst_type
;
7115 if (bdaddr_type_is_le(dst_type
)) {
7118 /* Convert from L2CAP channel address type to HCI address type
7120 if (dst_type
== BDADDR_LE_PUBLIC
)
7121 dst_type
= ADDR_LE_DEV_PUBLIC
;
7123 dst_type
= ADDR_LE_DEV_RANDOM
;
7125 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7126 role
= HCI_ROLE_SLAVE
;
7128 role
= HCI_ROLE_MASTER
;
7130 hcon
= hci_connect_le_scan(hdev
, dst
, dst_type
,
7132 HCI_LE_CONN_TIMEOUT
,
7135 u8 auth_type
= l2cap_get_auth_type(chan
);
7136 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7140 err
= PTR_ERR(hcon
);
7144 conn
= l2cap_conn_add(hcon
);
7146 hci_conn_drop(hcon
);
7151 mutex_lock(&conn
->chan_lock
);
7152 l2cap_chan_lock(chan
);
7154 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7155 hci_conn_drop(hcon
);
7160 /* Update source addr of the socket */
7161 bacpy(&chan
->src
, &hcon
->src
);
7162 chan
->src_type
= bdaddr_src_type(hcon
);
7164 __l2cap_chan_add(conn
, chan
);
7166 /* l2cap_chan_add takes its own ref so we can drop this one */
7167 hci_conn_drop(hcon
);
7169 l2cap_state_change(chan
, BT_CONNECT
);
7170 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7172 /* Release chan->sport so that it can be reused by other
7173 * sockets (as it's only used for listening sockets).
7175 write_lock(&chan_list_lock
);
7177 write_unlock(&chan_list_lock
);
7179 if (hcon
->state
== BT_CONNECTED
) {
7180 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7181 __clear_chan_timer(chan
);
7182 if (l2cap_chan_check_security(chan
, true))
7183 l2cap_state_change(chan
, BT_CONNECTED
);
7185 l2cap_do_start(chan
);
7191 l2cap_chan_unlock(chan
);
7192 mutex_unlock(&conn
->chan_lock
);
7194 hci_dev_unlock(hdev
);
7198 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7200 /* ---- L2CAP interface with lower layer (HCI) ---- */
7202 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7204 int exact
= 0, lm1
= 0, lm2
= 0;
7205 struct l2cap_chan
*c
;
7207 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7209 /* Find listening sockets and check their link_mode */
7210 read_lock(&chan_list_lock
);
7211 list_for_each_entry(c
, &chan_list
, global_l
) {
7212 if (c
->state
!= BT_LISTEN
)
7215 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7216 lm1
|= HCI_LM_ACCEPT
;
7217 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7218 lm1
|= HCI_LM_MASTER
;
7220 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7221 lm2
|= HCI_LM_ACCEPT
;
7222 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7223 lm2
|= HCI_LM_MASTER
;
7226 read_unlock(&chan_list_lock
);
7228 return exact
? lm1
: lm2
;
7231 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7232 * from an existing channel in the list or from the beginning of the
7233 * global list (by passing NULL as first parameter).
7235 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7236 struct hci_conn
*hcon
)
7238 u8 src_type
= bdaddr_src_type(hcon
);
7240 read_lock(&chan_list_lock
);
7243 c
= list_next_entry(c
, global_l
);
7245 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7247 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7248 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7250 if (c
->state
!= BT_LISTEN
)
7252 if (bacmp(&c
->src
, &hcon
->src
) && bacmp(&c
->src
, BDADDR_ANY
))
7254 if (src_type
!= c
->src_type
)
7258 read_unlock(&chan_list_lock
);
7262 read_unlock(&chan_list_lock
);
7267 static void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7269 struct hci_dev
*hdev
= hcon
->hdev
;
7270 struct l2cap_conn
*conn
;
7271 struct l2cap_chan
*pchan
;
7274 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7277 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7280 l2cap_conn_del(hcon
, bt_to_errno(status
));
7284 conn
= l2cap_conn_add(hcon
);
7288 dst_type
= bdaddr_dst_type(hcon
);
7290 /* If device is blocked, do not create channels for it */
7291 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7294 /* Find fixed channels and notify them of the new connection. We
7295 * use multiple individual lookups, continuing each time where
7296 * we left off, because the list lock would prevent calling the
7297 * potentially sleeping l2cap_chan_lock() function.
7299 pchan
= l2cap_global_fixed_chan(NULL
, hcon
);
7301 struct l2cap_chan
*chan
, *next
;
7303 /* Client fixed channels should override server ones */
7304 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7307 l2cap_chan_lock(pchan
);
7308 chan
= pchan
->ops
->new_connection(pchan
);
7310 bacpy(&chan
->src
, &hcon
->src
);
7311 bacpy(&chan
->dst
, &hcon
->dst
);
7312 chan
->src_type
= bdaddr_src_type(hcon
);
7313 chan
->dst_type
= dst_type
;
7315 __l2cap_chan_add(conn
, chan
);
7318 l2cap_chan_unlock(pchan
);
7320 next
= l2cap_global_fixed_chan(pchan
, hcon
);
7321 l2cap_chan_put(pchan
);
7325 l2cap_conn_ready(conn
);
7328 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7330 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7332 BT_DBG("hcon %p", hcon
);
7335 return HCI_ERROR_REMOTE_USER_TERM
;
7336 return conn
->disc_reason
;
7339 static void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7341 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7344 BT_DBG("hcon %p reason %d", hcon
, reason
);
7346 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7349 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7351 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7354 if (encrypt
== 0x00) {
7355 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7356 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7357 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7358 chan
->sec_level
== BT_SECURITY_FIPS
)
7359 l2cap_chan_close(chan
, ECONNREFUSED
);
7361 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7362 __clear_chan_timer(chan
);
7366 static void l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7368 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7369 struct l2cap_chan
*chan
;
7374 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7376 mutex_lock(&conn
->chan_lock
);
7378 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7379 l2cap_chan_lock(chan
);
7381 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7382 state_to_string(chan
->state
));
7384 if (chan
->scid
== L2CAP_CID_A2MP
) {
7385 l2cap_chan_unlock(chan
);
7389 if (!status
&& encrypt
)
7390 chan
->sec_level
= hcon
->sec_level
;
7392 if (!__l2cap_no_conn_pending(chan
)) {
7393 l2cap_chan_unlock(chan
);
7397 if (!status
&& (chan
->state
== BT_CONNECTED
||
7398 chan
->state
== BT_CONFIG
)) {
7399 chan
->ops
->resume(chan
);
7400 l2cap_check_encryption(chan
, encrypt
);
7401 l2cap_chan_unlock(chan
);
7405 if (chan
->state
== BT_CONNECT
) {
7407 l2cap_start_connection(chan
);
7409 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7410 } else if (chan
->state
== BT_CONNECT2
&&
7411 chan
->mode
!= L2CAP_MODE_LE_FLOWCTL
) {
7412 struct l2cap_conn_rsp rsp
;
7416 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7417 res
= L2CAP_CR_PEND
;
7418 stat
= L2CAP_CS_AUTHOR_PEND
;
7419 chan
->ops
->defer(chan
);
7421 l2cap_state_change(chan
, BT_CONFIG
);
7422 res
= L2CAP_CR_SUCCESS
;
7423 stat
= L2CAP_CS_NO_INFO
;
7426 l2cap_state_change(chan
, BT_DISCONN
);
7427 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7428 res
= L2CAP_CR_SEC_BLOCK
;
7429 stat
= L2CAP_CS_NO_INFO
;
7432 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7433 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7434 rsp
.result
= cpu_to_le16(res
);
7435 rsp
.status
= cpu_to_le16(stat
);
7436 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7439 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7440 res
== L2CAP_CR_SUCCESS
) {
7442 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7443 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7445 l2cap_build_conf_req(chan
, buf
),
7447 chan
->num_conf_req
++;
7451 l2cap_chan_unlock(chan
);
7454 mutex_unlock(&conn
->chan_lock
);
7457 void l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7459 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7460 struct l2cap_hdr
*hdr
;
7463 /* For AMP controller do not create l2cap conn */
7464 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7468 conn
= l2cap_conn_add(hcon
);
7473 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7477 case ACL_START_NO_FLUSH
:
7480 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7481 kfree_skb(conn
->rx_skb
);
7482 conn
->rx_skb
= NULL
;
7484 l2cap_conn_unreliable(conn
, ECOMM
);
7487 /* Start fragment always begin with Basic L2CAP header */
7488 if (skb
->len
< L2CAP_HDR_SIZE
) {
7489 BT_ERR("Frame is too short (len %d)", skb
->len
);
7490 l2cap_conn_unreliable(conn
, ECOMM
);
7494 hdr
= (struct l2cap_hdr
*) skb
->data
;
7495 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7497 if (len
== skb
->len
) {
7498 /* Complete frame received */
7499 l2cap_recv_frame(conn
, skb
);
7503 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7505 if (skb
->len
> len
) {
7506 BT_ERR("Frame is too long (len %d, expected len %d)",
7508 l2cap_conn_unreliable(conn
, ECOMM
);
7512 /* Allocate skb for the complete frame (with header) */
7513 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7517 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7519 conn
->rx_len
= len
- skb
->len
;
7523 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7525 if (!conn
->rx_len
) {
7526 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7527 l2cap_conn_unreliable(conn
, ECOMM
);
7531 if (skb
->len
> conn
->rx_len
) {
7532 BT_ERR("Fragment is too long (len %d, expected %d)",
7533 skb
->len
, conn
->rx_len
);
7534 kfree_skb(conn
->rx_skb
);
7535 conn
->rx_skb
= NULL
;
7537 l2cap_conn_unreliable(conn
, ECOMM
);
7541 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7543 conn
->rx_len
-= skb
->len
;
7545 if (!conn
->rx_len
) {
7546 /* Complete frame received. l2cap_recv_frame
7547 * takes ownership of the skb so set the global
7548 * rx_skb pointer to NULL first.
7550 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7551 conn
->rx_skb
= NULL
;
7552 l2cap_recv_frame(conn
, rx_skb
);
7561 static struct hci_cb l2cap_cb
= {
7563 .connect_cfm
= l2cap_connect_cfm
,
7564 .disconn_cfm
= l2cap_disconn_cfm
,
7565 .security_cfm
= l2cap_security_cfm
,
7568 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7570 struct l2cap_chan
*c
;
7572 read_lock(&chan_list_lock
);
7574 list_for_each_entry(c
, &chan_list
, global_l
) {
7575 seq_printf(f
, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7576 &c
->src
, c
->src_type
, &c
->dst
, c
->dst_type
,
7577 c
->state
, __le16_to_cpu(c
->psm
),
7578 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7579 c
->sec_level
, c
->mode
);
7582 read_unlock(&chan_list_lock
);
7587 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7589 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7592 static const struct file_operations l2cap_debugfs_fops
= {
7593 .open
= l2cap_debugfs_open
,
7595 .llseek
= seq_lseek
,
7596 .release
= single_release
,
7599 static struct dentry
*l2cap_debugfs
;
7601 int __init
l2cap_init(void)
7605 err
= l2cap_init_sockets();
7609 hci_register_cb(&l2cap_cb
);
7611 if (IS_ERR_OR_NULL(bt_debugfs
))
7614 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7615 NULL
, &l2cap_debugfs_fops
);
7617 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7619 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7625 void l2cap_exit(void)
7627 debugfs_remove(l2cap_debugfs
);
7628 hci_unregister_cb(&l2cap_cb
);
7629 l2cap_cleanup_sockets();
7632 module_param(disable_ertm
, bool, 0644);
7633 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");