2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
55 u8 code
, u8 ident
, u16 dlen
, void *data
);
56 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
58 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
, size_t data_size
);
59 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
61 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
62 struct sk_buff_head
*skbs
, u8 event
);
64 static inline u8
bdaddr_type(u8 link_type
, u8 bdaddr_type
)
66 if (link_type
== LE_LINK
) {
67 if (bdaddr_type
== ADDR_LE_DEV_PUBLIC
)
68 return BDADDR_LE_PUBLIC
;
70 return BDADDR_LE_RANDOM
;
76 static inline u8
bdaddr_src_type(struct hci_conn
*hcon
)
78 return bdaddr_type(hcon
->type
, hcon
->src_type
);
81 static inline u8
bdaddr_dst_type(struct hci_conn
*hcon
)
83 return bdaddr_type(hcon
->type
, hcon
->dst_type
);
86 /* ---- L2CAP channels ---- */
88 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
103 struct l2cap_chan
*c
;
105 list_for_each_entry(c
, &conn
->chan_l
, list
) {
112 /* Find channel with given SCID.
113 * Returns locked channel. */
114 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
117 struct l2cap_chan
*c
;
119 mutex_lock(&conn
->chan_lock
);
120 c
= __l2cap_get_chan_by_scid(conn
, cid
);
123 mutex_unlock(&conn
->chan_lock
);
128 /* Find channel with given DCID.
129 * Returns locked channel.
131 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
134 struct l2cap_chan
*c
;
136 mutex_lock(&conn
->chan_lock
);
137 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
140 mutex_unlock(&conn
->chan_lock
);
145 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
148 struct l2cap_chan
*c
;
150 list_for_each_entry(c
, &conn
->chan_l
, list
) {
151 if (c
->ident
== ident
)
157 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
160 struct l2cap_chan
*c
;
162 mutex_lock(&conn
->chan_lock
);
163 c
= __l2cap_get_chan_by_ident(conn
, ident
);
166 mutex_unlock(&conn
->chan_lock
);
171 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
,
174 struct l2cap_chan
*c
;
176 list_for_each_entry(c
, &chan_list
, global_l
) {
177 if (src_type
== BDADDR_BREDR
&& c
->src_type
!= BDADDR_BREDR
)
180 if (src_type
!= BDADDR_BREDR
&& c
->src_type
== BDADDR_BREDR
)
183 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
189 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
193 write_lock(&chan_list_lock
);
195 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
, chan
->src_type
)) {
205 u16 p
, start
, end
, incr
;
207 if (chan
->src_type
== BDADDR_BREDR
) {
208 start
= L2CAP_PSM_DYN_START
;
209 end
= L2CAP_PSM_AUTO_END
;
212 start
= L2CAP_PSM_LE_DYN_START
;
213 end
= L2CAP_PSM_LE_DYN_END
;
218 for (p
= start
; p
<= end
; p
+= incr
)
219 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
,
221 chan
->psm
= cpu_to_le16(p
);
222 chan
->sport
= cpu_to_le16(p
);
229 write_unlock(&chan_list_lock
);
232 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
234 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
236 write_lock(&chan_list_lock
);
238 /* Override the defaults (which are for conn-oriented) */
239 chan
->omtu
= L2CAP_DEFAULT_MTU
;
240 chan
->chan_type
= L2CAP_CHAN_FIXED
;
244 write_unlock(&chan_list_lock
);
249 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
253 if (conn
->hcon
->type
== LE_LINK
)
254 dyn_end
= L2CAP_CID_LE_DYN_END
;
256 dyn_end
= L2CAP_CID_DYN_END
;
258 for (cid
= L2CAP_CID_DYN_START
; cid
<= dyn_end
; cid
++) {
259 if (!__l2cap_get_chan_by_scid(conn
, cid
))
266 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
268 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
269 state_to_string(state
));
272 chan
->ops
->state_change(chan
, state
, 0);
275 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
279 chan
->ops
->state_change(chan
, chan
->state
, err
);
282 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
284 chan
->ops
->state_change(chan
, chan
->state
, err
);
287 static void __set_retrans_timer(struct l2cap_chan
*chan
)
289 if (!delayed_work_pending(&chan
->monitor_timer
) &&
290 chan
->retrans_timeout
) {
291 l2cap_set_timer(chan
, &chan
->retrans_timer
,
292 msecs_to_jiffies(chan
->retrans_timeout
));
296 static void __set_monitor_timer(struct l2cap_chan
*chan
)
298 __clear_retrans_timer(chan
);
299 if (chan
->monitor_timeout
) {
300 l2cap_set_timer(chan
, &chan
->monitor_timer
,
301 msecs_to_jiffies(chan
->monitor_timeout
));
305 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
310 skb_queue_walk(head
, skb
) {
311 if (bt_cb(skb
)->l2cap
.txseq
== seq
)
318 /* ---- L2CAP sequence number lists ---- */
320 /* For ERTM, ordered lists of sequence numbers must be tracked for
321 * SREJ requests that are received and for frames that are to be
322 * retransmitted. These seq_list functions implement a singly-linked
323 * list in an array, where membership in the list can also be checked
324 * in constant time. Items can also be added to the tail of the list
325 * and removed from the head in constant time, without further memory
329 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
331 size_t alloc_size
, i
;
333 /* Allocated size is a power of 2 to map sequence numbers
334 * (which may be up to 14 bits) in to a smaller array that is
335 * sized for the negotiated ERTM transmit windows.
337 alloc_size
= roundup_pow_of_two(size
);
339 seq_list
->list
= kmalloc_array(alloc_size
, sizeof(u16
), GFP_KERNEL
);
343 seq_list
->mask
= alloc_size
- 1;
344 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
345 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
346 for (i
= 0; i
< alloc_size
; i
++)
347 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
352 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
354 kfree(seq_list
->list
);
357 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
360 /* Constant-time check for list membership */
361 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
364 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
366 u16 seq
= seq_list
->head
;
367 u16 mask
= seq_list
->mask
;
369 seq_list
->head
= seq_list
->list
[seq
& mask
];
370 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
372 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
373 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
380 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
384 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
387 for (i
= 0; i
<= seq_list
->mask
; i
++)
388 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
390 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
391 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
394 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
396 u16 mask
= seq_list
->mask
;
398 /* All appends happen in constant time */
400 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
403 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
404 seq_list
->head
= seq
;
406 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
408 seq_list
->tail
= seq
;
409 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
412 static void l2cap_chan_timeout(struct work_struct
*work
)
414 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
416 struct l2cap_conn
*conn
= chan
->conn
;
419 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
421 mutex_lock(&conn
->chan_lock
);
422 l2cap_chan_lock(chan
);
424 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
425 reason
= ECONNREFUSED
;
426 else if (chan
->state
== BT_CONNECT
&&
427 chan
->sec_level
!= BT_SECURITY_SDP
)
428 reason
= ECONNREFUSED
;
432 l2cap_chan_close(chan
, reason
);
434 l2cap_chan_unlock(chan
);
436 chan
->ops
->close(chan
);
437 mutex_unlock(&conn
->chan_lock
);
439 l2cap_chan_put(chan
);
442 struct l2cap_chan
*l2cap_chan_create(void)
444 struct l2cap_chan
*chan
;
446 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
450 mutex_init(&chan
->lock
);
452 /* Set default lock nesting level */
453 atomic_set(&chan
->nesting
, L2CAP_NESTING_NORMAL
);
455 write_lock(&chan_list_lock
);
456 list_add(&chan
->global_l
, &chan_list
);
457 write_unlock(&chan_list_lock
);
459 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
461 chan
->state
= BT_OPEN
;
463 kref_init(&chan
->kref
);
465 /* This flag is cleared in l2cap_chan_ready() */
466 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
468 BT_DBG("chan %p", chan
);
472 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
474 static void l2cap_chan_destroy(struct kref
*kref
)
476 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
478 BT_DBG("chan %p", chan
);
480 write_lock(&chan_list_lock
);
481 list_del(&chan
->global_l
);
482 write_unlock(&chan_list_lock
);
487 void l2cap_chan_hold(struct l2cap_chan
*c
)
489 BT_DBG("chan %p orig refcnt %d", c
, kref_read(&c
->kref
));
494 void l2cap_chan_put(struct l2cap_chan
*c
)
496 BT_DBG("chan %p orig refcnt %d", c
, kref_read(&c
->kref
));
498 kref_put(&c
->kref
, l2cap_chan_destroy
);
500 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
502 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
504 chan
->fcs
= L2CAP_FCS_CRC16
;
505 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
506 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
507 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
508 chan
->remote_max_tx
= chan
->max_tx
;
509 chan
->remote_tx_win
= chan
->tx_win
;
510 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
511 chan
->sec_level
= BT_SECURITY_LOW
;
512 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
513 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
514 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
515 chan
->conf_state
= 0;
517 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
519 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
521 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
, u16 tx_credits
)
524 chan
->sdu_last_frag
= NULL
;
526 chan
->tx_credits
= tx_credits
;
527 /* Derive MPS from connection MTU to stop HCI fragmentation */
528 chan
->mps
= min_t(u16
, chan
->imtu
, chan
->conn
->mtu
- L2CAP_HDR_SIZE
);
529 /* Give enough credits for a full packet */
530 chan
->rx_credits
= (chan
->imtu
/ chan
->mps
) + 1;
532 skb_queue_head_init(&chan
->tx_q
);
535 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
537 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
538 __le16_to_cpu(chan
->psm
), chan
->dcid
);
540 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
544 switch (chan
->chan_type
) {
545 case L2CAP_CHAN_CONN_ORIENTED
:
546 /* Alloc CID for connection-oriented socket */
547 chan
->scid
= l2cap_alloc_cid(conn
);
548 if (conn
->hcon
->type
== ACL_LINK
)
549 chan
->omtu
= L2CAP_DEFAULT_MTU
;
552 case L2CAP_CHAN_CONN_LESS
:
553 /* Connectionless socket */
554 chan
->scid
= L2CAP_CID_CONN_LESS
;
555 chan
->dcid
= L2CAP_CID_CONN_LESS
;
556 chan
->omtu
= L2CAP_DEFAULT_MTU
;
559 case L2CAP_CHAN_FIXED
:
560 /* Caller will set CID and CID specific MTU values */
564 /* Raw socket can send/recv signalling messages only */
565 chan
->scid
= L2CAP_CID_SIGNALING
;
566 chan
->dcid
= L2CAP_CID_SIGNALING
;
567 chan
->omtu
= L2CAP_DEFAULT_MTU
;
570 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
571 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
572 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
573 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
574 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
575 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
577 l2cap_chan_hold(chan
);
579 /* Only keep a reference for fixed channels if they requested it */
580 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
581 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
582 hci_conn_hold(conn
->hcon
);
584 list_add(&chan
->list
, &conn
->chan_l
);
587 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
589 mutex_lock(&conn
->chan_lock
);
590 __l2cap_chan_add(conn
, chan
);
591 mutex_unlock(&conn
->chan_lock
);
594 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
596 struct l2cap_conn
*conn
= chan
->conn
;
598 __clear_chan_timer(chan
);
600 BT_DBG("chan %p, conn %p, err %d, state %s", chan
, conn
, err
,
601 state_to_string(chan
->state
));
603 chan
->ops
->teardown(chan
, err
);
606 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
607 /* Delete from channel list */
608 list_del(&chan
->list
);
610 l2cap_chan_put(chan
);
614 /* Reference was only held for non-fixed channels or
615 * fixed channels that explicitly requested it using the
616 * FLAG_HOLD_HCI_CONN flag.
618 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
619 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
620 hci_conn_drop(conn
->hcon
);
622 if (mgr
&& mgr
->bredr_chan
== chan
)
623 mgr
->bredr_chan
= NULL
;
626 if (chan
->hs_hchan
) {
627 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
629 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
630 amp_disconnect_logical_link(hs_hchan
);
633 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
637 case L2CAP_MODE_BASIC
:
640 case L2CAP_MODE_LE_FLOWCTL
:
641 skb_queue_purge(&chan
->tx_q
);
644 case L2CAP_MODE_ERTM
:
645 __clear_retrans_timer(chan
);
646 __clear_monitor_timer(chan
);
647 __clear_ack_timer(chan
);
649 skb_queue_purge(&chan
->srej_q
);
651 l2cap_seq_list_free(&chan
->srej_list
);
652 l2cap_seq_list_free(&chan
->retrans_list
);
656 case L2CAP_MODE_STREAMING
:
657 skb_queue_purge(&chan
->tx_q
);
663 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
665 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
667 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
668 id_addr_update_work
);
669 struct hci_conn
*hcon
= conn
->hcon
;
670 struct l2cap_chan
*chan
;
672 mutex_lock(&conn
->chan_lock
);
674 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
675 l2cap_chan_lock(chan
);
676 bacpy(&chan
->dst
, &hcon
->dst
);
677 chan
->dst_type
= bdaddr_dst_type(hcon
);
678 l2cap_chan_unlock(chan
);
681 mutex_unlock(&conn
->chan_lock
);
684 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
686 struct l2cap_conn
*conn
= chan
->conn
;
687 struct l2cap_le_conn_rsp rsp
;
690 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
691 result
= L2CAP_CR_LE_AUTHORIZATION
;
693 result
= L2CAP_CR_LE_BAD_PSM
;
695 l2cap_state_change(chan
, BT_DISCONN
);
697 rsp
.dcid
= cpu_to_le16(chan
->scid
);
698 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
699 rsp
.mps
= cpu_to_le16(chan
->mps
);
700 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
701 rsp
.result
= cpu_to_le16(result
);
703 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
707 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
709 struct l2cap_conn
*conn
= chan
->conn
;
710 struct l2cap_conn_rsp rsp
;
713 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
714 result
= L2CAP_CR_SEC_BLOCK
;
716 result
= L2CAP_CR_BAD_PSM
;
718 l2cap_state_change(chan
, BT_DISCONN
);
720 rsp
.scid
= cpu_to_le16(chan
->dcid
);
721 rsp
.dcid
= cpu_to_le16(chan
->scid
);
722 rsp
.result
= cpu_to_le16(result
);
723 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
725 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
728 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
730 struct l2cap_conn
*conn
= chan
->conn
;
732 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
734 switch (chan
->state
) {
736 chan
->ops
->teardown(chan
, 0);
741 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
742 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
743 l2cap_send_disconn_req(chan
, reason
);
745 l2cap_chan_del(chan
, reason
);
749 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
750 if (conn
->hcon
->type
== ACL_LINK
)
751 l2cap_chan_connect_reject(chan
);
752 else if (conn
->hcon
->type
== LE_LINK
)
753 l2cap_chan_le_connect_reject(chan
);
756 l2cap_chan_del(chan
, reason
);
761 l2cap_chan_del(chan
, reason
);
765 chan
->ops
->teardown(chan
, 0);
769 EXPORT_SYMBOL(l2cap_chan_close
);
771 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
773 switch (chan
->chan_type
) {
775 switch (chan
->sec_level
) {
776 case BT_SECURITY_HIGH
:
777 case BT_SECURITY_FIPS
:
778 return HCI_AT_DEDICATED_BONDING_MITM
;
779 case BT_SECURITY_MEDIUM
:
780 return HCI_AT_DEDICATED_BONDING
;
782 return HCI_AT_NO_BONDING
;
785 case L2CAP_CHAN_CONN_LESS
:
786 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
787 if (chan
->sec_level
== BT_SECURITY_LOW
)
788 chan
->sec_level
= BT_SECURITY_SDP
;
790 if (chan
->sec_level
== BT_SECURITY_HIGH
||
791 chan
->sec_level
== BT_SECURITY_FIPS
)
792 return HCI_AT_NO_BONDING_MITM
;
794 return HCI_AT_NO_BONDING
;
796 case L2CAP_CHAN_CONN_ORIENTED
:
797 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
798 if (chan
->sec_level
== BT_SECURITY_LOW
)
799 chan
->sec_level
= BT_SECURITY_SDP
;
801 if (chan
->sec_level
== BT_SECURITY_HIGH
||
802 chan
->sec_level
== BT_SECURITY_FIPS
)
803 return HCI_AT_NO_BONDING_MITM
;
805 return HCI_AT_NO_BONDING
;
809 switch (chan
->sec_level
) {
810 case BT_SECURITY_HIGH
:
811 case BT_SECURITY_FIPS
:
812 return HCI_AT_GENERAL_BONDING_MITM
;
813 case BT_SECURITY_MEDIUM
:
814 return HCI_AT_GENERAL_BONDING
;
816 return HCI_AT_NO_BONDING
;
822 /* Service level security */
823 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
825 struct l2cap_conn
*conn
= chan
->conn
;
828 if (conn
->hcon
->type
== LE_LINK
)
829 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
831 auth_type
= l2cap_get_auth_type(chan
);
833 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
837 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
841 /* Get next available identificator.
842 * 1 - 128 are used by kernel.
843 * 129 - 199 are reserved.
844 * 200 - 254 are used by utilities like l2ping, etc.
847 mutex_lock(&conn
->ident_lock
);
849 if (++conn
->tx_ident
> 128)
854 mutex_unlock(&conn
->ident_lock
);
859 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
862 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
865 BT_DBG("code 0x%2.2x", code
);
870 /* Use NO_FLUSH if supported or we have an LE link (which does
871 * not support auto-flushing packets) */
872 if (lmp_no_flush_capable(conn
->hcon
->hdev
) ||
873 conn
->hcon
->type
== LE_LINK
)
874 flags
= ACL_START_NO_FLUSH
;
878 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
879 skb
->priority
= HCI_PRIO_MAX
;
881 hci_send_acl(conn
->hchan
, skb
, flags
);
884 static bool __chan_is_moving(struct l2cap_chan
*chan
)
886 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
887 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
890 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
892 struct hci_conn
*hcon
= chan
->conn
->hcon
;
895 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
898 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
900 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
907 /* Use NO_FLUSH for LE links (where this is the only option) or
908 * if the BR/EDR link supports it and flushing has not been
909 * explicitly requested (through FLAG_FLUSHABLE).
911 if (hcon
->type
== LE_LINK
||
912 (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
913 lmp_no_flush_capable(hcon
->hdev
)))
914 flags
= ACL_START_NO_FLUSH
;
918 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
919 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
922 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
924 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
925 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
927 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
930 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
931 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
938 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
939 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
946 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
948 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
949 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
951 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
954 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
955 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
962 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
963 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
970 static inline void __unpack_control(struct l2cap_chan
*chan
,
973 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
974 __unpack_extended_control(get_unaligned_le32(skb
->data
),
976 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
978 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
980 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
984 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
988 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
989 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
991 if (control
->sframe
) {
992 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
993 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
994 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
996 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
997 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
1003 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
1007 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1008 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
1010 if (control
->sframe
) {
1011 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
1012 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
1013 packed
|= L2CAP_CTRL_FRAME_TYPE
;
1015 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
1016 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1022 static inline void __pack_control(struct l2cap_chan
*chan
,
1023 struct l2cap_ctrl
*control
,
1024 struct sk_buff
*skb
)
1026 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1027 put_unaligned_le32(__pack_extended_control(control
),
1028 skb
->data
+ L2CAP_HDR_SIZE
);
1030 put_unaligned_le16(__pack_enhanced_control(control
),
1031 skb
->data
+ L2CAP_HDR_SIZE
);
1035 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
1037 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1038 return L2CAP_EXT_HDR_SIZE
;
1040 return L2CAP_ENH_HDR_SIZE
;
1043 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1046 struct sk_buff
*skb
;
1047 struct l2cap_hdr
*lh
;
1048 int hlen
= __ertm_hdr_size(chan
);
1050 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1051 hlen
+= L2CAP_FCS_SIZE
;
1053 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1056 return ERR_PTR(-ENOMEM
);
1058 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
1059 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1060 lh
->cid
= cpu_to_le16(chan
->dcid
);
1062 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1063 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1065 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1067 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1068 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1069 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1072 skb
->priority
= HCI_PRIO_MAX
;
1076 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1077 struct l2cap_ctrl
*control
)
1079 struct sk_buff
*skb
;
1082 BT_DBG("chan %p, control %p", chan
, control
);
1084 if (!control
->sframe
)
1087 if (__chan_is_moving(chan
))
1090 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1094 if (control
->super
== L2CAP_SUPER_RR
)
1095 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1096 else if (control
->super
== L2CAP_SUPER_RNR
)
1097 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1099 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1100 chan
->last_acked_seq
= control
->reqseq
;
1101 __clear_ack_timer(chan
);
1104 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1105 control
->final
, control
->poll
, control
->super
);
1107 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1108 control_field
= __pack_extended_control(control
);
1110 control_field
= __pack_enhanced_control(control
);
1112 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1114 l2cap_do_send(chan
, skb
);
1117 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1119 struct l2cap_ctrl control
;
1121 BT_DBG("chan %p, poll %d", chan
, poll
);
1123 memset(&control
, 0, sizeof(control
));
1125 control
.poll
= poll
;
1127 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1128 control
.super
= L2CAP_SUPER_RNR
;
1130 control
.super
= L2CAP_SUPER_RR
;
1132 control
.reqseq
= chan
->buffer_seq
;
1133 l2cap_send_sframe(chan
, &control
);
1136 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1138 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1141 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1144 static bool __amp_capable(struct l2cap_chan
*chan
)
1146 struct l2cap_conn
*conn
= chan
->conn
;
1147 struct hci_dev
*hdev
;
1148 bool amp_available
= false;
1150 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
1153 if (!(conn
->remote_fixed_chan
& L2CAP_FC_A2MP
))
1156 read_lock(&hci_dev_list_lock
);
1157 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1158 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1159 test_bit(HCI_UP
, &hdev
->flags
)) {
1160 amp_available
= true;
1164 read_unlock(&hci_dev_list_lock
);
1166 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1167 return amp_available
;
1172 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1174 /* Check EFS parameters */
1178 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1180 struct l2cap_conn
*conn
= chan
->conn
;
1181 struct l2cap_conn_req req
;
1183 req
.scid
= cpu_to_le16(chan
->scid
);
1184 req
.psm
= chan
->psm
;
1186 chan
->ident
= l2cap_get_ident(conn
);
1188 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1190 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1193 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1195 struct l2cap_create_chan_req req
;
1196 req
.scid
= cpu_to_le16(chan
->scid
);
1197 req
.psm
= chan
->psm
;
1198 req
.amp_id
= amp_id
;
1200 chan
->ident
= l2cap_get_ident(chan
->conn
);
1202 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1206 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1208 struct sk_buff
*skb
;
1210 BT_DBG("chan %p", chan
);
1212 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1215 __clear_retrans_timer(chan
);
1216 __clear_monitor_timer(chan
);
1217 __clear_ack_timer(chan
);
1219 chan
->retry_count
= 0;
1220 skb_queue_walk(&chan
->tx_q
, skb
) {
1221 if (bt_cb(skb
)->l2cap
.retries
)
1222 bt_cb(skb
)->l2cap
.retries
= 1;
1227 chan
->expected_tx_seq
= chan
->buffer_seq
;
1229 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1230 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1231 l2cap_seq_list_clear(&chan
->retrans_list
);
1232 l2cap_seq_list_clear(&chan
->srej_list
);
1233 skb_queue_purge(&chan
->srej_q
);
1235 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1236 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1238 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1241 static void l2cap_move_done(struct l2cap_chan
*chan
)
1243 u8 move_role
= chan
->move_role
;
1244 BT_DBG("chan %p", chan
);
1246 chan
->move_state
= L2CAP_MOVE_STABLE
;
1247 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1249 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1252 switch (move_role
) {
1253 case L2CAP_MOVE_ROLE_INITIATOR
:
1254 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1255 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1257 case L2CAP_MOVE_ROLE_RESPONDER
:
1258 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1263 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1265 /* The channel may have already been flagged as connected in
1266 * case of receiving data before the L2CAP info req/rsp
1267 * procedure is complete.
1269 if (chan
->state
== BT_CONNECTED
)
1272 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 chan
->conf_state
= 0;
1274 __clear_chan_timer(chan
);
1276 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1277 chan
->ops
->suspend(chan
);
1279 chan
->state
= BT_CONNECTED
;
1281 chan
->ops
->ready(chan
);
1284 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1286 struct l2cap_conn
*conn
= chan
->conn
;
1287 struct l2cap_le_conn_req req
;
1289 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1293 chan
->imtu
= chan
->conn
->mtu
;
1295 l2cap_le_flowctl_init(chan
, 0);
1297 req
.psm
= chan
->psm
;
1298 req
.scid
= cpu_to_le16(chan
->scid
);
1299 req
.mtu
= cpu_to_le16(chan
->imtu
);
1300 req
.mps
= cpu_to_le16(chan
->mps
);
1301 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1303 chan
->ident
= l2cap_get_ident(conn
);
1305 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1309 static void l2cap_le_start(struct l2cap_chan
*chan
)
1311 struct l2cap_conn
*conn
= chan
->conn
;
1313 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1317 l2cap_chan_ready(chan
);
1321 if (chan
->state
== BT_CONNECT
)
1322 l2cap_le_connect(chan
);
1325 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1327 if (__amp_capable(chan
)) {
1328 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1329 a2mp_discover_amp(chan
);
1330 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1331 l2cap_le_start(chan
);
1333 l2cap_send_conn_req(chan
);
1337 static void l2cap_request_info(struct l2cap_conn
*conn
)
1339 struct l2cap_info_req req
;
1341 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1344 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1346 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1347 conn
->info_ident
= l2cap_get_ident(conn
);
1349 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1351 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1355 static bool l2cap_check_enc_key_size(struct hci_conn
*hcon
)
1357 /* The minimum encryption key size needs to be enforced by the
1358 * host stack before establishing any L2CAP connections. The
1359 * specification in theory allows a minimum of 1, but to align
1360 * BR/EDR and LE transports, a minimum of 7 is chosen.
1362 * This check might also be called for unencrypted connections
1363 * that have no key size requirements. Ensure that the link is
1364 * actually encrypted before enforcing a key size.
1366 return (!test_bit(HCI_CONN_ENCRYPT
, &hcon
->flags
) ||
1367 hcon
->enc_key_size
>= hcon
->hdev
->min_enc_key_size
);
1370 static void l2cap_do_start(struct l2cap_chan
*chan
)
1372 struct l2cap_conn
*conn
= chan
->conn
;
1374 if (conn
->hcon
->type
== LE_LINK
) {
1375 l2cap_le_start(chan
);
1379 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1380 l2cap_request_info(conn
);
1384 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1387 if (!l2cap_chan_check_security(chan
, true) ||
1388 !__l2cap_no_conn_pending(chan
))
1391 if (l2cap_check_enc_key_size(conn
->hcon
))
1392 l2cap_start_connection(chan
);
1394 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
1397 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1399 u32 local_feat_mask
= l2cap_feat_mask
;
1401 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1404 case L2CAP_MODE_ERTM
:
1405 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1406 case L2CAP_MODE_STREAMING
:
1407 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1413 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1415 struct l2cap_conn
*conn
= chan
->conn
;
1416 struct l2cap_disconn_req req
;
1421 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1422 __clear_retrans_timer(chan
);
1423 __clear_monitor_timer(chan
);
1424 __clear_ack_timer(chan
);
1427 if (chan
->scid
== L2CAP_CID_A2MP
) {
1428 l2cap_state_change(chan
, BT_DISCONN
);
1432 req
.dcid
= cpu_to_le16(chan
->dcid
);
1433 req
.scid
= cpu_to_le16(chan
->scid
);
1434 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1437 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1440 /* ---- L2CAP connections ---- */
1441 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1443 struct l2cap_chan
*chan
, *tmp
;
1445 BT_DBG("conn %p", conn
);
1447 mutex_lock(&conn
->chan_lock
);
1449 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1450 l2cap_chan_lock(chan
);
1452 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1453 l2cap_chan_ready(chan
);
1454 l2cap_chan_unlock(chan
);
1458 if (chan
->state
== BT_CONNECT
) {
1459 if (!l2cap_chan_check_security(chan
, true) ||
1460 !__l2cap_no_conn_pending(chan
)) {
1461 l2cap_chan_unlock(chan
);
1465 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1466 && test_bit(CONF_STATE2_DEVICE
,
1467 &chan
->conf_state
)) {
1468 l2cap_chan_close(chan
, ECONNRESET
);
1469 l2cap_chan_unlock(chan
);
1473 if (l2cap_check_enc_key_size(conn
->hcon
))
1474 l2cap_start_connection(chan
);
1476 l2cap_chan_close(chan
, ECONNREFUSED
);
1478 } else if (chan
->state
== BT_CONNECT2
) {
1479 struct l2cap_conn_rsp rsp
;
1481 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1482 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1484 if (l2cap_chan_check_security(chan
, false)) {
1485 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1486 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1487 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1488 chan
->ops
->defer(chan
);
1491 l2cap_state_change(chan
, BT_CONFIG
);
1492 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1493 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1496 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1497 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1500 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1503 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1504 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1505 l2cap_chan_unlock(chan
);
1509 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1510 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1511 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)), buf
);
1512 chan
->num_conf_req
++;
1515 l2cap_chan_unlock(chan
);
1518 mutex_unlock(&conn
->chan_lock
);
1521 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1523 struct hci_conn
*hcon
= conn
->hcon
;
1524 struct hci_dev
*hdev
= hcon
->hdev
;
1526 BT_DBG("%s conn %p", hdev
->name
, conn
);
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1532 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1534 /* For LE slave connections, make sure the connection interval
1535 * is in the range of the minium and maximum interval that has
1536 * been configured for this connection. If not, then trigger
1537 * the connection update procedure.
1539 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1540 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1541 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1542 struct l2cap_conn_param_update_req req
;
1544 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1545 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1546 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1547 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1549 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1550 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1554 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1556 struct l2cap_chan
*chan
;
1557 struct hci_conn
*hcon
= conn
->hcon
;
1559 BT_DBG("conn %p", conn
);
1561 if (hcon
->type
== ACL_LINK
)
1562 l2cap_request_info(conn
);
1564 mutex_lock(&conn
->chan_lock
);
1566 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1568 l2cap_chan_lock(chan
);
1570 if (chan
->scid
== L2CAP_CID_A2MP
) {
1571 l2cap_chan_unlock(chan
);
1575 if (hcon
->type
== LE_LINK
) {
1576 l2cap_le_start(chan
);
1577 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1578 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1579 l2cap_chan_ready(chan
);
1580 } else if (chan
->state
== BT_CONNECT
) {
1581 l2cap_do_start(chan
);
1584 l2cap_chan_unlock(chan
);
1587 mutex_unlock(&conn
->chan_lock
);
1589 if (hcon
->type
== LE_LINK
)
1590 l2cap_le_conn_ready(conn
);
1592 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1595 /* Notify sockets that we cannot guaranty reliability anymore */
1596 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1598 struct l2cap_chan
*chan
;
1600 BT_DBG("conn %p", conn
);
1602 mutex_lock(&conn
->chan_lock
);
1604 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1605 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1606 l2cap_chan_set_err(chan
, err
);
1609 mutex_unlock(&conn
->chan_lock
);
1612 static void l2cap_info_timeout(struct work_struct
*work
)
1614 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1617 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1618 conn
->info_ident
= 0;
1620 l2cap_conn_start(conn
);
1625 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1626 * callback is called during registration. The ->remove callback is called
1627 * during unregistration.
1628 * An l2cap_user object can either be explicitly unregistered or when the
1629 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1630 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1631 * External modules must own a reference to the l2cap_conn object if they intend
1632 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1633 * any time if they don't.
1636 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1638 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1641 /* We need to check whether l2cap_conn is registered. If it is not, we
1642 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1643 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1644 * relies on the parent hci_conn object to be locked. This itself relies
1645 * on the hci_dev object to be locked. So we must lock the hci device
1650 if (!list_empty(&user
->list
)) {
1655 /* conn->hchan is NULL after l2cap_conn_del() was called */
1661 ret
= user
->probe(conn
, user
);
1665 list_add(&user
->list
, &conn
->users
);
1669 hci_dev_unlock(hdev
);
1672 EXPORT_SYMBOL(l2cap_register_user
);
1674 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1676 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1680 if (list_empty(&user
->list
))
1683 list_del_init(&user
->list
);
1684 user
->remove(conn
, user
);
1687 hci_dev_unlock(hdev
);
1689 EXPORT_SYMBOL(l2cap_unregister_user
);
1691 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1693 struct l2cap_user
*user
;
1695 while (!list_empty(&conn
->users
)) {
1696 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1697 list_del_init(&user
->list
);
1698 user
->remove(conn
, user
);
1702 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1704 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1705 struct l2cap_chan
*chan
, *l
;
1710 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1712 kfree_skb(conn
->rx_skb
);
1714 skb_queue_purge(&conn
->pending_rx
);
1716 /* We can not call flush_work(&conn->pending_rx_work) here since we
1717 * might block if we are running on a worker from the same workqueue
1718 * pending_rx_work is waiting on.
1720 if (work_pending(&conn
->pending_rx_work
))
1721 cancel_work_sync(&conn
->pending_rx_work
);
1723 if (work_pending(&conn
->id_addr_update_work
))
1724 cancel_work_sync(&conn
->id_addr_update_work
);
1726 l2cap_unregister_all_users(conn
);
1728 /* Force the connection to be immediately dropped */
1729 hcon
->disc_timeout
= 0;
1731 mutex_lock(&conn
->chan_lock
);
1734 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1735 l2cap_chan_hold(chan
);
1736 l2cap_chan_lock(chan
);
1738 l2cap_chan_del(chan
, err
);
1740 l2cap_chan_unlock(chan
);
1742 chan
->ops
->close(chan
);
1743 l2cap_chan_put(chan
);
1746 mutex_unlock(&conn
->chan_lock
);
1748 hci_chan_del(conn
->hchan
);
1750 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1751 cancel_delayed_work_sync(&conn
->info_timer
);
1753 hcon
->l2cap_data
= NULL
;
1755 l2cap_conn_put(conn
);
1758 static void l2cap_conn_free(struct kref
*ref
)
1760 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1762 hci_conn_put(conn
->hcon
);
1766 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1768 kref_get(&conn
->ref
);
1771 EXPORT_SYMBOL(l2cap_conn_get
);
1773 void l2cap_conn_put(struct l2cap_conn
*conn
)
1775 kref_put(&conn
->ref
, l2cap_conn_free
);
1777 EXPORT_SYMBOL(l2cap_conn_put
);
1779 /* ---- Socket interface ---- */
1781 /* Find socket with psm and source / destination bdaddr.
1782 * Returns closest match.
1784 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1789 struct l2cap_chan
*c
, *c1
= NULL
;
1791 read_lock(&chan_list_lock
);
1793 list_for_each_entry(c
, &chan_list
, global_l
) {
1794 if (state
&& c
->state
!= state
)
1797 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1800 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1803 if (c
->psm
== psm
) {
1804 int src_match
, dst_match
;
1805 int src_any
, dst_any
;
1808 src_match
= !bacmp(&c
->src
, src
);
1809 dst_match
= !bacmp(&c
->dst
, dst
);
1810 if (src_match
&& dst_match
) {
1812 read_unlock(&chan_list_lock
);
1817 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1818 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1819 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1820 (src_any
&& dst_any
))
1826 l2cap_chan_hold(c1
);
1828 read_unlock(&chan_list_lock
);
1833 static void l2cap_monitor_timeout(struct work_struct
*work
)
1835 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1836 monitor_timer
.work
);
1838 BT_DBG("chan %p", chan
);
1840 l2cap_chan_lock(chan
);
1843 l2cap_chan_unlock(chan
);
1844 l2cap_chan_put(chan
);
1848 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1850 l2cap_chan_unlock(chan
);
1851 l2cap_chan_put(chan
);
1854 static void l2cap_retrans_timeout(struct work_struct
*work
)
1856 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1857 retrans_timer
.work
);
1859 BT_DBG("chan %p", chan
);
1861 l2cap_chan_lock(chan
);
1864 l2cap_chan_unlock(chan
);
1865 l2cap_chan_put(chan
);
1869 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1870 l2cap_chan_unlock(chan
);
1871 l2cap_chan_put(chan
);
1874 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1875 struct sk_buff_head
*skbs
)
1877 struct sk_buff
*skb
;
1878 struct l2cap_ctrl
*control
;
1880 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1882 if (__chan_is_moving(chan
))
1885 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1887 while (!skb_queue_empty(&chan
->tx_q
)) {
1889 skb
= skb_dequeue(&chan
->tx_q
);
1891 bt_cb(skb
)->l2cap
.retries
= 1;
1892 control
= &bt_cb(skb
)->l2cap
;
1894 control
->reqseq
= 0;
1895 control
->txseq
= chan
->next_tx_seq
;
1897 __pack_control(chan
, control
, skb
);
1899 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1900 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1901 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1904 l2cap_do_send(chan
, skb
);
1906 BT_DBG("Sent txseq %u", control
->txseq
);
1908 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1909 chan
->frames_sent
++;
1913 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1915 struct sk_buff
*skb
, *tx_skb
;
1916 struct l2cap_ctrl
*control
;
1919 BT_DBG("chan %p", chan
);
1921 if (chan
->state
!= BT_CONNECTED
)
1924 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1927 if (__chan_is_moving(chan
))
1930 while (chan
->tx_send_head
&&
1931 chan
->unacked_frames
< chan
->remote_tx_win
&&
1932 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1934 skb
= chan
->tx_send_head
;
1936 bt_cb(skb
)->l2cap
.retries
= 1;
1937 control
= &bt_cb(skb
)->l2cap
;
1939 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1942 control
->reqseq
= chan
->buffer_seq
;
1943 chan
->last_acked_seq
= chan
->buffer_seq
;
1944 control
->txseq
= chan
->next_tx_seq
;
1946 __pack_control(chan
, control
, skb
);
1948 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1949 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1950 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1953 /* Clone after data has been modified. Data is assumed to be
1954 read-only (for locking purposes) on cloned sk_buffs.
1956 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1961 __set_retrans_timer(chan
);
1963 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1964 chan
->unacked_frames
++;
1965 chan
->frames_sent
++;
1968 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1969 chan
->tx_send_head
= NULL
;
1971 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1973 l2cap_do_send(chan
, tx_skb
);
1974 BT_DBG("Sent txseq %u", control
->txseq
);
1977 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1978 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1983 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1985 struct l2cap_ctrl control
;
1986 struct sk_buff
*skb
;
1987 struct sk_buff
*tx_skb
;
1990 BT_DBG("chan %p", chan
);
1992 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1995 if (__chan_is_moving(chan
))
1998 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1999 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2001 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2003 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2008 bt_cb(skb
)->l2cap
.retries
++;
2009 control
= bt_cb(skb
)->l2cap
;
2011 if (chan
->max_tx
!= 0 &&
2012 bt_cb(skb
)->l2cap
.retries
> chan
->max_tx
) {
2013 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2014 l2cap_send_disconn_req(chan
, ECONNRESET
);
2015 l2cap_seq_list_clear(&chan
->retrans_list
);
2019 control
.reqseq
= chan
->buffer_seq
;
2020 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2025 if (skb_cloned(skb
)) {
2026 /* Cloned sk_buffs are read-only, so we need a
2029 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2031 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2035 l2cap_seq_list_clear(&chan
->retrans_list
);
2039 /* Update skb contents */
2040 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2041 put_unaligned_le32(__pack_extended_control(&control
),
2042 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2044 put_unaligned_le16(__pack_enhanced_control(&control
),
2045 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2049 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2050 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
2051 tx_skb
->len
- L2CAP_FCS_SIZE
);
2052 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
2056 l2cap_do_send(chan
, tx_skb
);
2058 BT_DBG("Resent txseq %d", control
.txseq
);
2060 chan
->last_acked_seq
= chan
->buffer_seq
;
2064 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2065 struct l2cap_ctrl
*control
)
2067 BT_DBG("chan %p, control %p", chan
, control
);
2069 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2070 l2cap_ertm_resend(chan
);
2073 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2074 struct l2cap_ctrl
*control
)
2076 struct sk_buff
*skb
;
2078 BT_DBG("chan %p, control %p", chan
, control
);
2081 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2083 l2cap_seq_list_clear(&chan
->retrans_list
);
2085 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2088 if (chan
->unacked_frames
) {
2089 skb_queue_walk(&chan
->tx_q
, skb
) {
2090 if (bt_cb(skb
)->l2cap
.txseq
== control
->reqseq
||
2091 skb
== chan
->tx_send_head
)
2095 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2096 if (skb
== chan
->tx_send_head
)
2099 l2cap_seq_list_append(&chan
->retrans_list
,
2100 bt_cb(skb
)->l2cap
.txseq
);
2103 l2cap_ertm_resend(chan
);
2107 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2109 struct l2cap_ctrl control
;
2110 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2111 chan
->last_acked_seq
);
2114 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2115 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2117 memset(&control
, 0, sizeof(control
));
2120 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2121 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2122 __clear_ack_timer(chan
);
2123 control
.super
= L2CAP_SUPER_RNR
;
2124 control
.reqseq
= chan
->buffer_seq
;
2125 l2cap_send_sframe(chan
, &control
);
2127 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2128 l2cap_ertm_send(chan
);
2129 /* If any i-frames were sent, they included an ack */
2130 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2134 /* Ack now if the window is 3/4ths full.
2135 * Calculate without mul or div
2137 threshold
= chan
->ack_win
;
2138 threshold
+= threshold
<< 1;
2141 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2144 if (frames_to_ack
>= threshold
) {
2145 __clear_ack_timer(chan
);
2146 control
.super
= L2CAP_SUPER_RR
;
2147 control
.reqseq
= chan
->buffer_seq
;
2148 l2cap_send_sframe(chan
, &control
);
2153 __set_ack_timer(chan
);
2157 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2158 struct msghdr
*msg
, int len
,
2159 int count
, struct sk_buff
*skb
)
2161 struct l2cap_conn
*conn
= chan
->conn
;
2162 struct sk_buff
**frag
;
2165 if (!copy_from_iter_full(skb_put(skb
, count
), count
, &msg
->msg_iter
))
2171 /* Continuation fragments (no L2CAP header) */
2172 frag
= &skb_shinfo(skb
)->frag_list
;
2174 struct sk_buff
*tmp
;
2176 count
= min_t(unsigned int, conn
->mtu
, len
);
2178 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2179 msg
->msg_flags
& MSG_DONTWAIT
);
2181 return PTR_ERR(tmp
);
2185 if (!copy_from_iter_full(skb_put(*frag
, count
), count
,
2192 skb
->len
+= (*frag
)->len
;
2193 skb
->data_len
+= (*frag
)->len
;
2195 frag
= &(*frag
)->next
;
2201 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2202 struct msghdr
*msg
, size_t len
)
2204 struct l2cap_conn
*conn
= chan
->conn
;
2205 struct sk_buff
*skb
;
2206 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2207 struct l2cap_hdr
*lh
;
2209 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2210 __le16_to_cpu(chan
->psm
), len
);
2212 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2214 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2215 msg
->msg_flags
& MSG_DONTWAIT
);
2219 /* Create L2CAP header */
2220 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2221 lh
->cid
= cpu_to_le16(chan
->dcid
);
2222 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2223 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2225 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2226 if (unlikely(err
< 0)) {
2228 return ERR_PTR(err
);
2233 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2234 struct msghdr
*msg
, size_t len
)
2236 struct l2cap_conn
*conn
= chan
->conn
;
2237 struct sk_buff
*skb
;
2239 struct l2cap_hdr
*lh
;
2241 BT_DBG("chan %p len %zu", chan
, len
);
2243 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2245 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2246 msg
->msg_flags
& MSG_DONTWAIT
);
2250 /* Create L2CAP header */
2251 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2252 lh
->cid
= cpu_to_le16(chan
->dcid
);
2253 lh
->len
= cpu_to_le16(len
);
2255 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2256 if (unlikely(err
< 0)) {
2258 return ERR_PTR(err
);
2263 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2264 struct msghdr
*msg
, size_t len
,
2267 struct l2cap_conn
*conn
= chan
->conn
;
2268 struct sk_buff
*skb
;
2269 int err
, count
, hlen
;
2270 struct l2cap_hdr
*lh
;
2272 BT_DBG("chan %p len %zu", chan
, len
);
2275 return ERR_PTR(-ENOTCONN
);
2277 hlen
= __ertm_hdr_size(chan
);
2280 hlen
+= L2CAP_SDULEN_SIZE
;
2282 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2283 hlen
+= L2CAP_FCS_SIZE
;
2285 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2287 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2288 msg
->msg_flags
& MSG_DONTWAIT
);
2292 /* Create L2CAP header */
2293 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2294 lh
->cid
= cpu_to_le16(chan
->dcid
);
2295 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2297 /* Control header is populated later */
2298 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2299 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2301 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2304 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2306 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2307 if (unlikely(err
< 0)) {
2309 return ERR_PTR(err
);
2312 bt_cb(skb
)->l2cap
.fcs
= chan
->fcs
;
2313 bt_cb(skb
)->l2cap
.retries
= 0;
2317 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2318 struct sk_buff_head
*seg_queue
,
2319 struct msghdr
*msg
, size_t len
)
2321 struct sk_buff
*skb
;
2326 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2328 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2329 * so fragmented skbs are not used. The HCI layer's handling
2330 * of fragmented skbs is not compatible with ERTM's queueing.
2333 /* PDU size is derived from the HCI MTU */
2334 pdu_len
= chan
->conn
->mtu
;
2336 /* Constrain PDU size for BR/EDR connections */
2338 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2340 /* Adjust for largest possible L2CAP overhead. */
2342 pdu_len
-= L2CAP_FCS_SIZE
;
2344 pdu_len
-= __ertm_hdr_size(chan
);
2346 /* Remote device may have requested smaller PDUs */
2347 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2349 if (len
<= pdu_len
) {
2350 sar
= L2CAP_SAR_UNSEGMENTED
;
2354 sar
= L2CAP_SAR_START
;
2359 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2362 __skb_queue_purge(seg_queue
);
2363 return PTR_ERR(skb
);
2366 bt_cb(skb
)->l2cap
.sar
= sar
;
2367 __skb_queue_tail(seg_queue
, skb
);
2373 if (len
<= pdu_len
) {
2374 sar
= L2CAP_SAR_END
;
2377 sar
= L2CAP_SAR_CONTINUE
;
2384 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2386 size_t len
, u16 sdulen
)
2388 struct l2cap_conn
*conn
= chan
->conn
;
2389 struct sk_buff
*skb
;
2390 int err
, count
, hlen
;
2391 struct l2cap_hdr
*lh
;
2393 BT_DBG("chan %p len %zu", chan
, len
);
2396 return ERR_PTR(-ENOTCONN
);
2398 hlen
= L2CAP_HDR_SIZE
;
2401 hlen
+= L2CAP_SDULEN_SIZE
;
2403 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2405 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2406 msg
->msg_flags
& MSG_DONTWAIT
);
2410 /* Create L2CAP header */
2411 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2412 lh
->cid
= cpu_to_le16(chan
->dcid
);
2413 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2416 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2418 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2419 if (unlikely(err
< 0)) {
2421 return ERR_PTR(err
);
2427 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2428 struct sk_buff_head
*seg_queue
,
2429 struct msghdr
*msg
, size_t len
)
2431 struct sk_buff
*skb
;
2435 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2438 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2444 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2446 __skb_queue_purge(seg_queue
);
2447 return PTR_ERR(skb
);
2450 __skb_queue_tail(seg_queue
, skb
);
2456 pdu_len
+= L2CAP_SDULEN_SIZE
;
2463 static void l2cap_le_flowctl_send(struct l2cap_chan
*chan
)
2467 BT_DBG("chan %p", chan
);
2469 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2470 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2475 BT_DBG("Sent %d credits %u queued %u", sent
, chan
->tx_credits
,
2476 skb_queue_len(&chan
->tx_q
));
2479 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2481 struct sk_buff
*skb
;
2483 struct sk_buff_head seg_queue
;
2488 /* Connectionless channel */
2489 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2490 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2492 return PTR_ERR(skb
);
2494 /* Channel lock is released before requesting new skb and then
2495 * reacquired thus we need to recheck channel state.
2497 if (chan
->state
!= BT_CONNECTED
) {
2502 l2cap_do_send(chan
, skb
);
2506 switch (chan
->mode
) {
2507 case L2CAP_MODE_LE_FLOWCTL
:
2508 /* Check outgoing MTU */
2509 if (len
> chan
->omtu
)
2512 __skb_queue_head_init(&seg_queue
);
2514 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2516 if (chan
->state
!= BT_CONNECTED
) {
2517 __skb_queue_purge(&seg_queue
);
2524 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2526 l2cap_le_flowctl_send(chan
);
2528 if (!chan
->tx_credits
)
2529 chan
->ops
->suspend(chan
);
2535 case L2CAP_MODE_BASIC
:
2536 /* Check outgoing MTU */
2537 if (len
> chan
->omtu
)
2540 /* Create a basic PDU */
2541 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2543 return PTR_ERR(skb
);
2545 /* Channel lock is released before requesting new skb and then
2546 * reacquired thus we need to recheck channel state.
2548 if (chan
->state
!= BT_CONNECTED
) {
2553 l2cap_do_send(chan
, skb
);
2557 case L2CAP_MODE_ERTM
:
2558 case L2CAP_MODE_STREAMING
:
2559 /* Check outgoing MTU */
2560 if (len
> chan
->omtu
) {
2565 __skb_queue_head_init(&seg_queue
);
2567 /* Do segmentation before calling in to the state machine,
2568 * since it's possible to block while waiting for memory
2571 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2573 /* The channel could have been closed while segmenting,
2574 * check that it is still connected.
2576 if (chan
->state
!= BT_CONNECTED
) {
2577 __skb_queue_purge(&seg_queue
);
2584 if (chan
->mode
== L2CAP_MODE_ERTM
)
2585 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2587 l2cap_streaming_send(chan
, &seg_queue
);
2591 /* If the skbs were not queued for sending, they'll still be in
2592 * seg_queue and need to be purged.
2594 __skb_queue_purge(&seg_queue
);
2598 BT_DBG("bad state %1.1x", chan
->mode
);
2604 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2606 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2608 struct l2cap_ctrl control
;
2611 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2613 memset(&control
, 0, sizeof(control
));
2615 control
.super
= L2CAP_SUPER_SREJ
;
2617 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2618 seq
= __next_seq(chan
, seq
)) {
2619 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2620 control
.reqseq
= seq
;
2621 l2cap_send_sframe(chan
, &control
);
2622 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2626 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2629 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2631 struct l2cap_ctrl control
;
2633 BT_DBG("chan %p", chan
);
2635 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2638 memset(&control
, 0, sizeof(control
));
2640 control
.super
= L2CAP_SUPER_SREJ
;
2641 control
.reqseq
= chan
->srej_list
.tail
;
2642 l2cap_send_sframe(chan
, &control
);
2645 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2647 struct l2cap_ctrl control
;
2651 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2653 memset(&control
, 0, sizeof(control
));
2655 control
.super
= L2CAP_SUPER_SREJ
;
2657 /* Capture initial list head to allow only one pass through the list. */
2658 initial_head
= chan
->srej_list
.head
;
2661 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2662 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2665 control
.reqseq
= seq
;
2666 l2cap_send_sframe(chan
, &control
);
2667 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2668 } while (chan
->srej_list
.head
!= initial_head
);
2671 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2673 struct sk_buff
*acked_skb
;
2676 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2678 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2681 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2682 chan
->expected_ack_seq
, chan
->unacked_frames
);
2684 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2685 ackseq
= __next_seq(chan
, ackseq
)) {
2687 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2689 skb_unlink(acked_skb
, &chan
->tx_q
);
2690 kfree_skb(acked_skb
);
2691 chan
->unacked_frames
--;
2695 chan
->expected_ack_seq
= reqseq
;
2697 if (chan
->unacked_frames
== 0)
2698 __clear_retrans_timer(chan
);
2700 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2703 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2705 BT_DBG("chan %p", chan
);
2707 chan
->expected_tx_seq
= chan
->buffer_seq
;
2708 l2cap_seq_list_clear(&chan
->srej_list
);
2709 skb_queue_purge(&chan
->srej_q
);
2710 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2713 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2714 struct l2cap_ctrl
*control
,
2715 struct sk_buff_head
*skbs
, u8 event
)
2717 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2721 case L2CAP_EV_DATA_REQUEST
:
2722 if (chan
->tx_send_head
== NULL
)
2723 chan
->tx_send_head
= skb_peek(skbs
);
2725 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2726 l2cap_ertm_send(chan
);
2728 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2729 BT_DBG("Enter LOCAL_BUSY");
2730 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2732 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2733 /* The SREJ_SENT state must be aborted if we are to
2734 * enter the LOCAL_BUSY state.
2736 l2cap_abort_rx_srej_sent(chan
);
2739 l2cap_send_ack(chan
);
2742 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2743 BT_DBG("Exit LOCAL_BUSY");
2744 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2746 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2747 struct l2cap_ctrl local_control
;
2749 memset(&local_control
, 0, sizeof(local_control
));
2750 local_control
.sframe
= 1;
2751 local_control
.super
= L2CAP_SUPER_RR
;
2752 local_control
.poll
= 1;
2753 local_control
.reqseq
= chan
->buffer_seq
;
2754 l2cap_send_sframe(chan
, &local_control
);
2756 chan
->retry_count
= 1;
2757 __set_monitor_timer(chan
);
2758 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2762 l2cap_process_reqseq(chan
, control
->reqseq
);
2764 case L2CAP_EV_EXPLICIT_POLL
:
2765 l2cap_send_rr_or_rnr(chan
, 1);
2766 chan
->retry_count
= 1;
2767 __set_monitor_timer(chan
);
2768 __clear_ack_timer(chan
);
2769 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2771 case L2CAP_EV_RETRANS_TO
:
2772 l2cap_send_rr_or_rnr(chan
, 1);
2773 chan
->retry_count
= 1;
2774 __set_monitor_timer(chan
);
2775 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2777 case L2CAP_EV_RECV_FBIT
:
2778 /* Nothing to process */
2785 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2786 struct l2cap_ctrl
*control
,
2787 struct sk_buff_head
*skbs
, u8 event
)
2789 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2793 case L2CAP_EV_DATA_REQUEST
:
2794 if (chan
->tx_send_head
== NULL
)
2795 chan
->tx_send_head
= skb_peek(skbs
);
2796 /* Queue data, but don't send. */
2797 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2799 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2800 BT_DBG("Enter LOCAL_BUSY");
2801 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2803 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2804 /* The SREJ_SENT state must be aborted if we are to
2805 * enter the LOCAL_BUSY state.
2807 l2cap_abort_rx_srej_sent(chan
);
2810 l2cap_send_ack(chan
);
2813 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2814 BT_DBG("Exit LOCAL_BUSY");
2815 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2817 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2818 struct l2cap_ctrl local_control
;
2819 memset(&local_control
, 0, sizeof(local_control
));
2820 local_control
.sframe
= 1;
2821 local_control
.super
= L2CAP_SUPER_RR
;
2822 local_control
.poll
= 1;
2823 local_control
.reqseq
= chan
->buffer_seq
;
2824 l2cap_send_sframe(chan
, &local_control
);
2826 chan
->retry_count
= 1;
2827 __set_monitor_timer(chan
);
2828 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2831 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2832 l2cap_process_reqseq(chan
, control
->reqseq
);
2836 case L2CAP_EV_RECV_FBIT
:
2837 if (control
&& control
->final
) {
2838 __clear_monitor_timer(chan
);
2839 if (chan
->unacked_frames
> 0)
2840 __set_retrans_timer(chan
);
2841 chan
->retry_count
= 0;
2842 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2843 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2846 case L2CAP_EV_EXPLICIT_POLL
:
2849 case L2CAP_EV_MONITOR_TO
:
2850 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2851 l2cap_send_rr_or_rnr(chan
, 1);
2852 __set_monitor_timer(chan
);
2853 chan
->retry_count
++;
2855 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2863 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2864 struct sk_buff_head
*skbs
, u8 event
)
2866 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2867 chan
, control
, skbs
, event
, chan
->tx_state
);
2869 switch (chan
->tx_state
) {
2870 case L2CAP_TX_STATE_XMIT
:
2871 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2873 case L2CAP_TX_STATE_WAIT_F
:
2874 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2882 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2883 struct l2cap_ctrl
*control
)
2885 BT_DBG("chan %p, control %p", chan
, control
);
2886 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2889 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2890 struct l2cap_ctrl
*control
)
2892 BT_DBG("chan %p, control %p", chan
, control
);
2893 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2896 /* Copy frame to all raw sockets on that connection */
2897 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2899 struct sk_buff
*nskb
;
2900 struct l2cap_chan
*chan
;
2902 BT_DBG("conn %p", conn
);
2904 mutex_lock(&conn
->chan_lock
);
2906 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2907 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2910 /* Don't send frame to the channel it came from */
2911 if (bt_cb(skb
)->l2cap
.chan
== chan
)
2914 nskb
= skb_clone(skb
, GFP_KERNEL
);
2917 if (chan
->ops
->recv(chan
, nskb
))
2921 mutex_unlock(&conn
->chan_lock
);
2924 /* ---- L2CAP signalling commands ---- */
2925 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2926 u8 ident
, u16 dlen
, void *data
)
2928 struct sk_buff
*skb
, **frag
;
2929 struct l2cap_cmd_hdr
*cmd
;
2930 struct l2cap_hdr
*lh
;
2933 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2934 conn
, code
, ident
, dlen
);
2936 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2939 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2940 count
= min_t(unsigned int, conn
->mtu
, len
);
2942 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2946 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2947 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2949 if (conn
->hcon
->type
== LE_LINK
)
2950 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2952 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2954 cmd
= skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2957 cmd
->len
= cpu_to_le16(dlen
);
2960 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2961 skb_put_data(skb
, data
, count
);
2967 /* Continuation fragments (no L2CAP header) */
2968 frag
= &skb_shinfo(skb
)->frag_list
;
2970 count
= min_t(unsigned int, conn
->mtu
, len
);
2972 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2976 skb_put_data(*frag
, data
, count
);
2981 frag
= &(*frag
)->next
;
2991 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2994 struct l2cap_conf_opt
*opt
= *ptr
;
2997 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
3005 *val
= *((u8
*) opt
->val
);
3009 *val
= get_unaligned_le16(opt
->val
);
3013 *val
= get_unaligned_le32(opt
->val
);
3017 *val
= (unsigned long) opt
->val
;
3021 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3025 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
, size_t size
)
3027 struct l2cap_conf_opt
*opt
= *ptr
;
3029 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3031 if (size
< L2CAP_CONF_OPT_SIZE
+ len
)
3039 *((u8
*) opt
->val
) = val
;
3043 put_unaligned_le16(val
, opt
->val
);
3047 put_unaligned_le32(val
, opt
->val
);
3051 memcpy(opt
->val
, (void *) val
, len
);
3055 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3058 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
, size_t size
)
3060 struct l2cap_conf_efs efs
;
3062 switch (chan
->mode
) {
3063 case L2CAP_MODE_ERTM
:
3064 efs
.id
= chan
->local_id
;
3065 efs
.stype
= chan
->local_stype
;
3066 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3067 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3068 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3069 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3072 case L2CAP_MODE_STREAMING
:
3074 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3075 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3076 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3085 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3086 (unsigned long) &efs
, size
);
3089 static void l2cap_ack_timeout(struct work_struct
*work
)
3091 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3095 BT_DBG("chan %p", chan
);
3097 l2cap_chan_lock(chan
);
3099 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3100 chan
->last_acked_seq
);
3103 l2cap_send_rr_or_rnr(chan
, 0);
3105 l2cap_chan_unlock(chan
);
3106 l2cap_chan_put(chan
);
3109 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3113 chan
->next_tx_seq
= 0;
3114 chan
->expected_tx_seq
= 0;
3115 chan
->expected_ack_seq
= 0;
3116 chan
->unacked_frames
= 0;
3117 chan
->buffer_seq
= 0;
3118 chan
->frames_sent
= 0;
3119 chan
->last_acked_seq
= 0;
3121 chan
->sdu_last_frag
= NULL
;
3124 skb_queue_head_init(&chan
->tx_q
);
3126 chan
->local_amp_id
= AMP_ID_BREDR
;
3127 chan
->move_id
= AMP_ID_BREDR
;
3128 chan
->move_state
= L2CAP_MOVE_STABLE
;
3129 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3131 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3134 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3135 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3137 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3138 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3139 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3141 skb_queue_head_init(&chan
->srej_q
);
3143 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3147 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3149 l2cap_seq_list_free(&chan
->srej_list
);
3154 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3157 case L2CAP_MODE_STREAMING
:
3158 case L2CAP_MODE_ERTM
:
3159 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3163 return L2CAP_MODE_BASIC
;
3167 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3169 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3170 (conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
));
3173 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3175 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3176 (conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
));
3179 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3180 struct l2cap_conf_rfc
*rfc
)
3182 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3183 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3185 /* Class 1 devices have must have ERTM timeouts
3186 * exceeding the Link Supervision Timeout. The
3187 * default Link Supervision Timeout for AMP
3188 * controllers is 10 seconds.
3190 * Class 1 devices use 0xffffffff for their
3191 * best-effort flush timeout, so the clamping logic
3192 * will result in a timeout that meets the above
3193 * requirement. ERTM timeouts are 16-bit values, so
3194 * the maximum timeout is 65.535 seconds.
3197 /* Convert timeout to milliseconds and round */
3198 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3200 /* This is the recommended formula for class 2 devices
3201 * that start ERTM timers when packets are sent to the
3204 ertm_to
= 3 * ertm_to
+ 500;
3206 if (ertm_to
> 0xffff)
3209 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3210 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3212 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3213 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3217 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3219 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3220 __l2cap_ews_supported(chan
->conn
)) {
3221 /* use extended control field */
3222 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3223 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3225 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3226 L2CAP_DEFAULT_TX_WINDOW
);
3227 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3229 chan
->ack_win
= chan
->tx_win
;
3232 static void l2cap_mtu_auto(struct l2cap_chan
*chan
)
3234 struct hci_conn
*conn
= chan
->conn
->hcon
;
3236 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3238 /* The 2-DH1 packet has between 2 and 56 information bytes
3239 * (including the 2-byte payload header)
3241 if (!(conn
->pkt_type
& HCI_2DH1
))
3244 /* The 3-DH1 packet has between 2 and 85 information bytes
3245 * (including the 2-byte payload header)
3247 if (!(conn
->pkt_type
& HCI_3DH1
))
3250 /* The 2-DH3 packet has between 2 and 369 information bytes
3251 * (including the 2-byte payload header)
3253 if (!(conn
->pkt_type
& HCI_2DH3
))
3256 /* The 3-DH3 packet has between 2 and 554 information bytes
3257 * (including the 2-byte payload header)
3259 if (!(conn
->pkt_type
& HCI_3DH3
))
3262 /* The 2-DH5 packet has between 2 and 681 information bytes
3263 * (including the 2-byte payload header)
3265 if (!(conn
->pkt_type
& HCI_2DH5
))
3268 /* The 3-DH5 packet has between 2 and 1023 information bytes
3269 * (including the 2-byte payload header)
3271 if (!(conn
->pkt_type
& HCI_3DH5
))
3275 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
, size_t data_size
)
3277 struct l2cap_conf_req
*req
= data
;
3278 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3279 void *ptr
= req
->data
;
3280 void *endptr
= data
+ data_size
;
3283 BT_DBG("chan %p", chan
);
3285 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3288 switch (chan
->mode
) {
3289 case L2CAP_MODE_STREAMING
:
3290 case L2CAP_MODE_ERTM
:
3291 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3294 if (__l2cap_efs_supported(chan
->conn
))
3295 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3299 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3304 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
) {
3306 l2cap_mtu_auto(chan
);
3307 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
,
3311 switch (chan
->mode
) {
3312 case L2CAP_MODE_BASIC
:
3316 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3317 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3320 rfc
.mode
= L2CAP_MODE_BASIC
;
3322 rfc
.max_transmit
= 0;
3323 rfc
.retrans_timeout
= 0;
3324 rfc
.monitor_timeout
= 0;
3325 rfc
.max_pdu_size
= 0;
3327 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3328 (unsigned long) &rfc
, endptr
- ptr
);
3331 case L2CAP_MODE_ERTM
:
3332 rfc
.mode
= L2CAP_MODE_ERTM
;
3333 rfc
.max_transmit
= chan
->max_tx
;
3335 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3337 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3338 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3340 rfc
.max_pdu_size
= cpu_to_le16(size
);
3342 l2cap_txwin_setup(chan
);
3344 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3345 L2CAP_DEFAULT_TX_WINDOW
);
3347 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3348 (unsigned long) &rfc
, endptr
- ptr
);
3350 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3351 l2cap_add_opt_efs(&ptr
, chan
, endptr
- ptr
);
3353 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3354 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3355 chan
->tx_win
, endptr
- ptr
);
3357 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3358 if (chan
->fcs
== L2CAP_FCS_NONE
||
3359 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3360 chan
->fcs
= L2CAP_FCS_NONE
;
3361 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3362 chan
->fcs
, endptr
- ptr
);
3366 case L2CAP_MODE_STREAMING
:
3367 l2cap_txwin_setup(chan
);
3368 rfc
.mode
= L2CAP_MODE_STREAMING
;
3370 rfc
.max_transmit
= 0;
3371 rfc
.retrans_timeout
= 0;
3372 rfc
.monitor_timeout
= 0;
3374 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3375 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3377 rfc
.max_pdu_size
= cpu_to_le16(size
);
3379 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3380 (unsigned long) &rfc
, endptr
- ptr
);
3382 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3383 l2cap_add_opt_efs(&ptr
, chan
, endptr
- ptr
);
3385 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3386 if (chan
->fcs
== L2CAP_FCS_NONE
||
3387 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3388 chan
->fcs
= L2CAP_FCS_NONE
;
3389 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3390 chan
->fcs
, endptr
- ptr
);
3395 req
->dcid
= cpu_to_le16(chan
->dcid
);
3396 req
->flags
= cpu_to_le16(0);
3401 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
, size_t data_size
)
3403 struct l2cap_conf_rsp
*rsp
= data
;
3404 void *ptr
= rsp
->data
;
3405 void *endptr
= data
+ data_size
;
3406 void *req
= chan
->conf_req
;
3407 int len
= chan
->conf_len
;
3408 int type
, hint
, olen
;
3410 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3411 struct l2cap_conf_efs efs
;
3413 u16 mtu
= L2CAP_DEFAULT_MTU
;
3414 u16 result
= L2CAP_CONF_SUCCESS
;
3417 BT_DBG("chan %p", chan
);
3419 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3420 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3424 hint
= type
& L2CAP_CONF_HINT
;
3425 type
&= L2CAP_CONF_MASK
;
3428 case L2CAP_CONF_MTU
:
3434 case L2CAP_CONF_FLUSH_TO
:
3437 chan
->flush_to
= val
;
3440 case L2CAP_CONF_QOS
:
3443 case L2CAP_CONF_RFC
:
3444 if (olen
!= sizeof(rfc
))
3446 memcpy(&rfc
, (void *) val
, olen
);
3449 case L2CAP_CONF_FCS
:
3452 if (val
== L2CAP_FCS_NONE
)
3453 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3456 case L2CAP_CONF_EFS
:
3457 if (olen
!= sizeof(efs
))
3460 memcpy(&efs
, (void *) val
, olen
);
3463 case L2CAP_CONF_EWS
:
3466 if (!(chan
->conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
3467 return -ECONNREFUSED
;
3468 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3469 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3470 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3471 chan
->remote_tx_win
= val
;
3477 result
= L2CAP_CONF_UNKNOWN
;
3478 *((u8
*) ptr
++) = type
;
3483 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3486 switch (chan
->mode
) {
3487 case L2CAP_MODE_STREAMING
:
3488 case L2CAP_MODE_ERTM
:
3489 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3490 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3491 chan
->conn
->feat_mask
);
3496 if (__l2cap_efs_supported(chan
->conn
))
3497 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3499 return -ECONNREFUSED
;
3502 if (chan
->mode
!= rfc
.mode
)
3503 return -ECONNREFUSED
;
3509 if (chan
->mode
!= rfc
.mode
) {
3510 result
= L2CAP_CONF_UNACCEPT
;
3511 rfc
.mode
= chan
->mode
;
3513 if (chan
->num_conf_rsp
== 1)
3514 return -ECONNREFUSED
;
3516 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3517 (unsigned long) &rfc
, endptr
- ptr
);
3520 if (result
== L2CAP_CONF_SUCCESS
) {
3521 /* Configure output options and let the other side know
3522 * which ones we don't like. */
3524 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3525 result
= L2CAP_CONF_UNACCEPT
;
3528 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3530 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
, endptr
- ptr
);
3533 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3534 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3535 efs
.stype
!= chan
->local_stype
) {
3537 result
= L2CAP_CONF_UNACCEPT
;
3539 if (chan
->num_conf_req
>= 1)
3540 return -ECONNREFUSED
;
3542 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3544 (unsigned long) &efs
, endptr
- ptr
);
3546 /* Send PENDING Conf Rsp */
3547 result
= L2CAP_CONF_PENDING
;
3548 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3553 case L2CAP_MODE_BASIC
:
3554 chan
->fcs
= L2CAP_FCS_NONE
;
3555 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3558 case L2CAP_MODE_ERTM
:
3559 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3560 chan
->remote_tx_win
= rfc
.txwin_size
;
3562 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3564 chan
->remote_max_tx
= rfc
.max_transmit
;
3566 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3567 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3568 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3569 rfc
.max_pdu_size
= cpu_to_le16(size
);
3570 chan
->remote_mps
= size
;
3572 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3574 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3576 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3577 sizeof(rfc
), (unsigned long) &rfc
, endptr
- ptr
);
3579 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3580 chan
->remote_id
= efs
.id
;
3581 chan
->remote_stype
= efs
.stype
;
3582 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3583 chan
->remote_flush_to
=
3584 le32_to_cpu(efs
.flush_to
);
3585 chan
->remote_acc_lat
=
3586 le32_to_cpu(efs
.acc_lat
);
3587 chan
->remote_sdu_itime
=
3588 le32_to_cpu(efs
.sdu_itime
);
3589 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3591 (unsigned long) &efs
, endptr
- ptr
);
3595 case L2CAP_MODE_STREAMING
:
3596 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3597 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3598 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3599 rfc
.max_pdu_size
= cpu_to_le16(size
);
3600 chan
->remote_mps
= size
;
3602 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3604 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3605 (unsigned long) &rfc
, endptr
- ptr
);
3610 result
= L2CAP_CONF_UNACCEPT
;
3612 memset(&rfc
, 0, sizeof(rfc
));
3613 rfc
.mode
= chan
->mode
;
3616 if (result
== L2CAP_CONF_SUCCESS
)
3617 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3619 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3620 rsp
->result
= cpu_to_le16(result
);
3621 rsp
->flags
= cpu_to_le16(0);
3626 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3627 void *data
, size_t size
, u16
*result
)
3629 struct l2cap_conf_req
*req
= data
;
3630 void *ptr
= req
->data
;
3631 void *endptr
= data
+ size
;
3634 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3635 struct l2cap_conf_efs efs
;
3637 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3639 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3640 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3645 case L2CAP_CONF_MTU
:
3648 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3649 *result
= L2CAP_CONF_UNACCEPT
;
3650 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3653 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
,
3657 case L2CAP_CONF_FLUSH_TO
:
3660 chan
->flush_to
= val
;
3661 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
, 2,
3662 chan
->flush_to
, endptr
- ptr
);
3665 case L2CAP_CONF_RFC
:
3666 if (olen
!= sizeof(rfc
))
3668 memcpy(&rfc
, (void *)val
, olen
);
3669 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3670 rfc
.mode
!= chan
->mode
)
3671 return -ECONNREFUSED
;
3673 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3674 (unsigned long) &rfc
, endptr
- ptr
);
3677 case L2CAP_CONF_EWS
:
3680 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3681 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3682 chan
->tx_win
, endptr
- ptr
);
3685 case L2CAP_CONF_EFS
:
3686 if (olen
!= sizeof(efs
))
3688 memcpy(&efs
, (void *)val
, olen
);
3689 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3690 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3691 efs
.stype
!= chan
->local_stype
)
3692 return -ECONNREFUSED
;
3693 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3694 (unsigned long) &efs
, endptr
- ptr
);
3697 case L2CAP_CONF_FCS
:
3700 if (*result
== L2CAP_CONF_PENDING
)
3701 if (val
== L2CAP_FCS_NONE
)
3702 set_bit(CONF_RECV_NO_FCS
,
3708 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3709 return -ECONNREFUSED
;
3711 chan
->mode
= rfc
.mode
;
3713 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3715 case L2CAP_MODE_ERTM
:
3716 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3717 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3718 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3719 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3720 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3723 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3724 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3725 chan
->local_sdu_itime
=
3726 le32_to_cpu(efs
.sdu_itime
);
3727 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3728 chan
->local_flush_to
=
3729 le32_to_cpu(efs
.flush_to
);
3733 case L2CAP_MODE_STREAMING
:
3734 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3738 req
->dcid
= cpu_to_le16(chan
->dcid
);
3739 req
->flags
= cpu_to_le16(0);
3744 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3745 u16 result
, u16 flags
)
3747 struct l2cap_conf_rsp
*rsp
= data
;
3748 void *ptr
= rsp
->data
;
3750 BT_DBG("chan %p", chan
);
3752 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3753 rsp
->result
= cpu_to_le16(result
);
3754 rsp
->flags
= cpu_to_le16(flags
);
3759 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3761 struct l2cap_le_conn_rsp rsp
;
3762 struct l2cap_conn
*conn
= chan
->conn
;
3764 BT_DBG("chan %p", chan
);
3766 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3767 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3768 rsp
.mps
= cpu_to_le16(chan
->mps
);
3769 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3770 rsp
.result
= cpu_to_le16(L2CAP_CR_LE_SUCCESS
);
3772 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3776 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3778 struct l2cap_conn_rsp rsp
;
3779 struct l2cap_conn
*conn
= chan
->conn
;
3783 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3784 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3785 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3786 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3789 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3791 rsp_code
= L2CAP_CONN_RSP
;
3793 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3795 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3797 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3800 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3801 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)), buf
);
3802 chan
->num_conf_req
++;
3805 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3809 /* Use sane default values in case a misbehaving remote device
3810 * did not send an RFC or extended window size option.
3812 u16 txwin_ext
= chan
->ack_win
;
3813 struct l2cap_conf_rfc rfc
= {
3815 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3816 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3817 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3818 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3821 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3823 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3826 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3827 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3832 case L2CAP_CONF_RFC
:
3833 if (olen
!= sizeof(rfc
))
3835 memcpy(&rfc
, (void *)val
, olen
);
3837 case L2CAP_CONF_EWS
:
3846 case L2CAP_MODE_ERTM
:
3847 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3848 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3849 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3850 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3851 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3853 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3856 case L2CAP_MODE_STREAMING
:
3857 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3861 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3862 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3865 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3867 if (cmd_len
< sizeof(*rej
))
3870 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3873 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3874 cmd
->ident
== conn
->info_ident
) {
3875 cancel_delayed_work(&conn
->info_timer
);
3877 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3878 conn
->info_ident
= 0;
3880 l2cap_conn_start(conn
);
3886 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3887 struct l2cap_cmd_hdr
*cmd
,
3888 u8
*data
, u8 rsp_code
, u8 amp_id
)
3890 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3891 struct l2cap_conn_rsp rsp
;
3892 struct l2cap_chan
*chan
= NULL
, *pchan
;
3893 int result
, status
= L2CAP_CS_NO_INFO
;
3895 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3896 __le16 psm
= req
->psm
;
3898 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3900 /* Check if we have socket listening on psm */
3901 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3902 &conn
->hcon
->dst
, ACL_LINK
);
3904 result
= L2CAP_CR_BAD_PSM
;
3908 mutex_lock(&conn
->chan_lock
);
3909 l2cap_chan_lock(pchan
);
3911 /* Check if the ACL is secure enough (if not SDP) */
3912 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3913 !hci_conn_check_link_mode(conn
->hcon
)) {
3914 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3915 result
= L2CAP_CR_SEC_BLOCK
;
3919 result
= L2CAP_CR_NO_MEM
;
3921 /* Check for valid dynamic CID range (as per Erratum 3253) */
3922 if (scid
< L2CAP_CID_DYN_START
|| scid
> L2CAP_CID_DYN_END
) {
3923 result
= L2CAP_CR_INVALID_SCID
;
3927 /* Check if we already have channel with that dcid */
3928 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3929 result
= L2CAP_CR_SCID_IN_USE
;
3933 chan
= pchan
->ops
->new_connection(pchan
);
3937 /* For certain devices (ex: HID mouse), support for authentication,
3938 * pairing and bonding is optional. For such devices, inorder to avoid
3939 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3940 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3942 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3944 bacpy(&chan
->src
, &conn
->hcon
->src
);
3945 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3946 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
3947 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
3950 chan
->local_amp_id
= amp_id
;
3952 __l2cap_chan_add(conn
, chan
);
3956 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3958 chan
->ident
= cmd
->ident
;
3960 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3961 if (l2cap_chan_check_security(chan
, false)) {
3962 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3963 l2cap_state_change(chan
, BT_CONNECT2
);
3964 result
= L2CAP_CR_PEND
;
3965 status
= L2CAP_CS_AUTHOR_PEND
;
3966 chan
->ops
->defer(chan
);
3968 /* Force pending result for AMP controllers.
3969 * The connection will succeed after the
3970 * physical link is up.
3972 if (amp_id
== AMP_ID_BREDR
) {
3973 l2cap_state_change(chan
, BT_CONFIG
);
3974 result
= L2CAP_CR_SUCCESS
;
3976 l2cap_state_change(chan
, BT_CONNECT2
);
3977 result
= L2CAP_CR_PEND
;
3979 status
= L2CAP_CS_NO_INFO
;
3982 l2cap_state_change(chan
, BT_CONNECT2
);
3983 result
= L2CAP_CR_PEND
;
3984 status
= L2CAP_CS_AUTHEN_PEND
;
3987 l2cap_state_change(chan
, BT_CONNECT2
);
3988 result
= L2CAP_CR_PEND
;
3989 status
= L2CAP_CS_NO_INFO
;
3993 l2cap_chan_unlock(pchan
);
3994 mutex_unlock(&conn
->chan_lock
);
3995 l2cap_chan_put(pchan
);
3998 rsp
.scid
= cpu_to_le16(scid
);
3999 rsp
.dcid
= cpu_to_le16(dcid
);
4000 rsp
.result
= cpu_to_le16(result
);
4001 rsp
.status
= cpu_to_le16(status
);
4002 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
4004 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
4005 struct l2cap_info_req info
;
4006 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4008 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
4009 conn
->info_ident
= l2cap_get_ident(conn
);
4011 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
4013 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
4014 sizeof(info
), &info
);
4017 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
4018 result
== L2CAP_CR_SUCCESS
) {
4020 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4021 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4022 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)), buf
);
4023 chan
->num_conf_req
++;
4029 static int l2cap_connect_req(struct l2cap_conn
*conn
,
4030 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4032 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
4033 struct hci_conn
*hcon
= conn
->hcon
;
4035 if (cmd_len
< sizeof(struct l2cap_conn_req
))
4039 if (hci_dev_test_flag(hdev
, HCI_MGMT
) &&
4040 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
4041 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
4042 hci_dev_unlock(hdev
);
4044 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
4048 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
4049 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4052 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
4053 u16 scid
, dcid
, result
, status
;
4054 struct l2cap_chan
*chan
;
4058 if (cmd_len
< sizeof(*rsp
))
4061 scid
= __le16_to_cpu(rsp
->scid
);
4062 dcid
= __le16_to_cpu(rsp
->dcid
);
4063 result
= __le16_to_cpu(rsp
->result
);
4064 status
= __le16_to_cpu(rsp
->status
);
4066 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4067 dcid
, scid
, result
, status
);
4069 mutex_lock(&conn
->chan_lock
);
4072 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4078 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
4087 l2cap_chan_lock(chan
);
4090 case L2CAP_CR_SUCCESS
:
4091 l2cap_state_change(chan
, BT_CONFIG
);
4094 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4096 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
4099 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4100 l2cap_build_conf_req(chan
, req
, sizeof(req
)), req
);
4101 chan
->num_conf_req
++;
4105 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4109 l2cap_chan_del(chan
, ECONNREFUSED
);
4113 l2cap_chan_unlock(chan
);
4116 mutex_unlock(&conn
->chan_lock
);
4121 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4123 /* FCS is enabled only in ERTM or streaming mode, if one or both
4126 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4127 chan
->fcs
= L2CAP_FCS_NONE
;
4128 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4129 chan
->fcs
= L2CAP_FCS_CRC16
;
4132 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4133 u8 ident
, u16 flags
)
4135 struct l2cap_conn
*conn
= chan
->conn
;
4137 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4140 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4141 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4143 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4144 l2cap_build_conf_rsp(chan
, data
,
4145 L2CAP_CONF_SUCCESS
, flags
), data
);
4148 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4151 struct l2cap_cmd_rej_cid rej
;
4153 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4154 rej
.scid
= __cpu_to_le16(scid
);
4155 rej
.dcid
= __cpu_to_le16(dcid
);
4157 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4160 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4161 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4164 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4167 struct l2cap_chan
*chan
;
4170 if (cmd_len
< sizeof(*req
))
4173 dcid
= __le16_to_cpu(req
->dcid
);
4174 flags
= __le16_to_cpu(req
->flags
);
4176 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4178 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4180 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4184 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4185 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4190 /* Reject if config buffer is too small. */
4191 len
= cmd_len
- sizeof(*req
);
4192 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4193 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4194 l2cap_build_conf_rsp(chan
, rsp
,
4195 L2CAP_CONF_REJECT
, flags
), rsp
);
4200 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4201 chan
->conf_len
+= len
;
4203 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4204 /* Incomplete config. Send empty response. */
4205 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4206 l2cap_build_conf_rsp(chan
, rsp
,
4207 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4211 /* Complete config. */
4212 len
= l2cap_parse_conf_req(chan
, rsp
, sizeof(rsp
));
4214 l2cap_send_disconn_req(chan
, ECONNRESET
);
4218 chan
->ident
= cmd
->ident
;
4219 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4220 chan
->num_conf_rsp
++;
4222 /* Reset config buffer. */
4225 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4228 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4229 set_default_fcs(chan
);
4231 if (chan
->mode
== L2CAP_MODE_ERTM
||
4232 chan
->mode
== L2CAP_MODE_STREAMING
)
4233 err
= l2cap_ertm_init(chan
);
4236 l2cap_send_disconn_req(chan
, -err
);
4238 l2cap_chan_ready(chan
);
4243 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4245 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4246 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)), buf
);
4247 chan
->num_conf_req
++;
4250 /* Got Conf Rsp PENDING from remote side and assume we sent
4251 Conf Rsp PENDING in the code above */
4252 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4253 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4255 /* check compatibility */
4257 /* Send rsp for BR/EDR channel */
4259 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4261 chan
->ident
= cmd
->ident
;
4265 l2cap_chan_unlock(chan
);
4269 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4270 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4273 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4274 u16 scid
, flags
, result
;
4275 struct l2cap_chan
*chan
;
4276 int len
= cmd_len
- sizeof(*rsp
);
4279 if (cmd_len
< sizeof(*rsp
))
4282 scid
= __le16_to_cpu(rsp
->scid
);
4283 flags
= __le16_to_cpu(rsp
->flags
);
4284 result
= __le16_to_cpu(rsp
->result
);
4286 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4289 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4294 case L2CAP_CONF_SUCCESS
:
4295 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4296 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4299 case L2CAP_CONF_PENDING
:
4300 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4302 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4305 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4306 buf
, sizeof(buf
), &result
);
4308 l2cap_send_disconn_req(chan
, ECONNRESET
);
4312 if (!chan
->hs_hcon
) {
4313 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4316 if (l2cap_check_efs(chan
)) {
4317 amp_create_logical_link(chan
);
4318 chan
->ident
= cmd
->ident
;
4324 case L2CAP_CONF_UNACCEPT
:
4325 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4328 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4329 l2cap_send_disconn_req(chan
, ECONNRESET
);
4333 /* throw out any old stored conf requests */
4334 result
= L2CAP_CONF_SUCCESS
;
4335 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4336 req
, sizeof(req
), &result
);
4338 l2cap_send_disconn_req(chan
, ECONNRESET
);
4342 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4343 L2CAP_CONF_REQ
, len
, req
);
4344 chan
->num_conf_req
++;
4345 if (result
!= L2CAP_CONF_SUCCESS
)
4352 l2cap_chan_set_err(chan
, ECONNRESET
);
4354 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4355 l2cap_send_disconn_req(chan
, ECONNRESET
);
4359 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4362 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4364 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4365 set_default_fcs(chan
);
4367 if (chan
->mode
== L2CAP_MODE_ERTM
||
4368 chan
->mode
== L2CAP_MODE_STREAMING
)
4369 err
= l2cap_ertm_init(chan
);
4372 l2cap_send_disconn_req(chan
, -err
);
4374 l2cap_chan_ready(chan
);
4378 l2cap_chan_unlock(chan
);
4382 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4383 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4386 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4387 struct l2cap_disconn_rsp rsp
;
4389 struct l2cap_chan
*chan
;
4391 if (cmd_len
!= sizeof(*req
))
4394 scid
= __le16_to_cpu(req
->scid
);
4395 dcid
= __le16_to_cpu(req
->dcid
);
4397 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4399 mutex_lock(&conn
->chan_lock
);
4401 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4403 mutex_unlock(&conn
->chan_lock
);
4404 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4408 l2cap_chan_lock(chan
);
4410 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4411 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4412 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4414 chan
->ops
->set_shutdown(chan
);
4416 l2cap_chan_hold(chan
);
4417 l2cap_chan_del(chan
, ECONNRESET
);
4419 l2cap_chan_unlock(chan
);
4421 chan
->ops
->close(chan
);
4422 l2cap_chan_put(chan
);
4424 mutex_unlock(&conn
->chan_lock
);
4429 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4430 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4433 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4435 struct l2cap_chan
*chan
;
4437 if (cmd_len
!= sizeof(*rsp
))
4440 scid
= __le16_to_cpu(rsp
->scid
);
4441 dcid
= __le16_to_cpu(rsp
->dcid
);
4443 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4445 mutex_lock(&conn
->chan_lock
);
4447 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4449 mutex_unlock(&conn
->chan_lock
);
4453 l2cap_chan_lock(chan
);
4455 if (chan
->state
!= BT_DISCONN
) {
4456 l2cap_chan_unlock(chan
);
4457 mutex_unlock(&conn
->chan_lock
);
4461 l2cap_chan_hold(chan
);
4462 l2cap_chan_del(chan
, 0);
4464 l2cap_chan_unlock(chan
);
4466 chan
->ops
->close(chan
);
4467 l2cap_chan_put(chan
);
4469 mutex_unlock(&conn
->chan_lock
);
4474 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4475 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4478 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4481 if (cmd_len
!= sizeof(*req
))
4484 type
= __le16_to_cpu(req
->type
);
4486 BT_DBG("type 0x%4.4x", type
);
4488 if (type
== L2CAP_IT_FEAT_MASK
) {
4490 u32 feat_mask
= l2cap_feat_mask
;
4491 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4492 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4493 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4495 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4497 if (conn
->local_fixed_chan
& L2CAP_FC_A2MP
)
4498 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4499 | L2CAP_FEAT_EXT_WINDOW
;
4501 put_unaligned_le32(feat_mask
, rsp
->data
);
4502 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4504 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4506 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4508 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4509 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4510 rsp
->data
[0] = conn
->local_fixed_chan
;
4511 memset(rsp
->data
+ 1, 0, 7);
4512 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4515 struct l2cap_info_rsp rsp
;
4516 rsp
.type
= cpu_to_le16(type
);
4517 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4518 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4525 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4526 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4529 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4532 if (cmd_len
< sizeof(*rsp
))
4535 type
= __le16_to_cpu(rsp
->type
);
4536 result
= __le16_to_cpu(rsp
->result
);
4538 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4540 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4541 if (cmd
->ident
!= conn
->info_ident
||
4542 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4545 cancel_delayed_work(&conn
->info_timer
);
4547 if (result
!= L2CAP_IR_SUCCESS
) {
4548 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4549 conn
->info_ident
= 0;
4551 l2cap_conn_start(conn
);
4557 case L2CAP_IT_FEAT_MASK
:
4558 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4560 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4561 struct l2cap_info_req req
;
4562 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4564 conn
->info_ident
= l2cap_get_ident(conn
);
4566 l2cap_send_cmd(conn
, conn
->info_ident
,
4567 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4569 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4570 conn
->info_ident
= 0;
4572 l2cap_conn_start(conn
);
4576 case L2CAP_IT_FIXED_CHAN
:
4577 conn
->remote_fixed_chan
= rsp
->data
[0];
4578 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4579 conn
->info_ident
= 0;
4581 l2cap_conn_start(conn
);
4588 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4589 struct l2cap_cmd_hdr
*cmd
,
4590 u16 cmd_len
, void *data
)
4592 struct l2cap_create_chan_req
*req
= data
;
4593 struct l2cap_create_chan_rsp rsp
;
4594 struct l2cap_chan
*chan
;
4595 struct hci_dev
*hdev
;
4598 if (cmd_len
!= sizeof(*req
))
4601 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4604 psm
= le16_to_cpu(req
->psm
);
4605 scid
= le16_to_cpu(req
->scid
);
4607 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4609 /* For controller id 0 make BR/EDR connection */
4610 if (req
->amp_id
== AMP_ID_BREDR
) {
4611 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4616 /* Validate AMP controller id */
4617 hdev
= hci_dev_get(req
->amp_id
);
4621 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4626 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4629 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4630 struct hci_conn
*hs_hcon
;
4632 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4636 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4641 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4643 mgr
->bredr_chan
= chan
;
4644 chan
->hs_hcon
= hs_hcon
;
4645 chan
->fcs
= L2CAP_FCS_NONE
;
4646 conn
->mtu
= hdev
->block_mtu
;
4655 rsp
.scid
= cpu_to_le16(scid
);
4656 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4657 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4659 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4665 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4667 struct l2cap_move_chan_req req
;
4670 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4672 ident
= l2cap_get_ident(chan
->conn
);
4673 chan
->ident
= ident
;
4675 req
.icid
= cpu_to_le16(chan
->scid
);
4676 req
.dest_amp_id
= dest_amp_id
;
4678 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4681 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4684 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4686 struct l2cap_move_chan_rsp rsp
;
4688 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4690 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4691 rsp
.result
= cpu_to_le16(result
);
4693 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4697 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4699 struct l2cap_move_chan_cfm cfm
;
4701 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4703 chan
->ident
= l2cap_get_ident(chan
->conn
);
4705 cfm
.icid
= cpu_to_le16(chan
->scid
);
4706 cfm
.result
= cpu_to_le16(result
);
4708 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4711 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4714 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4716 struct l2cap_move_chan_cfm cfm
;
4718 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4720 cfm
.icid
= cpu_to_le16(icid
);
4721 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4723 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4727 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4730 struct l2cap_move_chan_cfm_rsp rsp
;
4732 BT_DBG("icid 0x%4.4x", icid
);
4734 rsp
.icid
= cpu_to_le16(icid
);
4735 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4738 static void __release_logical_link(struct l2cap_chan
*chan
)
4740 chan
->hs_hchan
= NULL
;
4741 chan
->hs_hcon
= NULL
;
4743 /* Placeholder - release the logical link */
4746 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4748 /* Logical link setup failed */
4749 if (chan
->state
!= BT_CONNECTED
) {
4750 /* Create channel failure, disconnect */
4751 l2cap_send_disconn_req(chan
, ECONNRESET
);
4755 switch (chan
->move_role
) {
4756 case L2CAP_MOVE_ROLE_RESPONDER
:
4757 l2cap_move_done(chan
);
4758 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4760 case L2CAP_MOVE_ROLE_INITIATOR
:
4761 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4762 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4763 /* Remote has only sent pending or
4764 * success responses, clean up
4766 l2cap_move_done(chan
);
4769 /* Other amp move states imply that the move
4770 * has already aborted
4772 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4777 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4778 struct hci_chan
*hchan
)
4780 struct l2cap_conf_rsp rsp
;
4782 chan
->hs_hchan
= hchan
;
4783 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4785 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4787 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4790 set_default_fcs(chan
);
4792 err
= l2cap_ertm_init(chan
);
4794 l2cap_send_disconn_req(chan
, -err
);
4796 l2cap_chan_ready(chan
);
4800 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4801 struct hci_chan
*hchan
)
4803 chan
->hs_hcon
= hchan
->conn
;
4804 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4806 BT_DBG("move_state %d", chan
->move_state
);
4808 switch (chan
->move_state
) {
4809 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4810 /* Move confirm will be sent after a success
4811 * response is received
4813 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4815 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4816 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4817 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4818 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4819 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4820 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4821 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4822 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4823 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4827 /* Move was not in expected state, free the channel */
4828 __release_logical_link(chan
);
4830 chan
->move_state
= L2CAP_MOVE_STABLE
;
4834 /* Call with chan locked */
4835 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4838 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4841 l2cap_logical_fail(chan
);
4842 __release_logical_link(chan
);
4846 if (chan
->state
!= BT_CONNECTED
) {
4847 /* Ignore logical link if channel is on BR/EDR */
4848 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4849 l2cap_logical_finish_create(chan
, hchan
);
4851 l2cap_logical_finish_move(chan
, hchan
);
4855 void l2cap_move_start(struct l2cap_chan
*chan
)
4857 BT_DBG("chan %p", chan
);
4859 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4860 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4862 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4863 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4864 /* Placeholder - start physical link setup */
4866 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4867 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4869 l2cap_move_setup(chan
);
4870 l2cap_send_move_chan_req(chan
, 0);
4874 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4875 u8 local_amp_id
, u8 remote_amp_id
)
4877 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4878 local_amp_id
, remote_amp_id
);
4880 chan
->fcs
= L2CAP_FCS_NONE
;
4882 /* Outgoing channel on AMP */
4883 if (chan
->state
== BT_CONNECT
) {
4884 if (result
== L2CAP_CR_SUCCESS
) {
4885 chan
->local_amp_id
= local_amp_id
;
4886 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4888 /* Revert to BR/EDR connect */
4889 l2cap_send_conn_req(chan
);
4895 /* Incoming channel on AMP */
4896 if (__l2cap_no_conn_pending(chan
)) {
4897 struct l2cap_conn_rsp rsp
;
4899 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4900 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4902 if (result
== L2CAP_CR_SUCCESS
) {
4903 /* Send successful response */
4904 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4905 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4907 /* Send negative response */
4908 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4909 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4912 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4915 if (result
== L2CAP_CR_SUCCESS
) {
4916 l2cap_state_change(chan
, BT_CONFIG
);
4917 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4918 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4920 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)), buf
);
4921 chan
->num_conf_req
++;
4926 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4929 l2cap_move_setup(chan
);
4930 chan
->move_id
= local_amp_id
;
4931 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4933 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4936 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4938 struct hci_chan
*hchan
= NULL
;
4940 /* Placeholder - get hci_chan for logical link */
4943 if (hchan
->state
== BT_CONNECTED
) {
4944 /* Logical link is ready to go */
4945 chan
->hs_hcon
= hchan
->conn
;
4946 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4947 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4948 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4950 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4952 /* Wait for logical link to be ready */
4953 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4956 /* Logical link not available */
4957 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4961 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4963 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4965 if (result
== -EINVAL
)
4966 rsp_result
= L2CAP_MR_BAD_ID
;
4968 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4970 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4973 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4974 chan
->move_state
= L2CAP_MOVE_STABLE
;
4976 /* Restart data transmission */
4977 l2cap_ertm_send(chan
);
4980 /* Invoke with locked chan */
4981 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4983 u8 local_amp_id
= chan
->local_amp_id
;
4984 u8 remote_amp_id
= chan
->remote_amp_id
;
4986 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4987 chan
, result
, local_amp_id
, remote_amp_id
);
4989 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
)
4992 if (chan
->state
!= BT_CONNECTED
) {
4993 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4994 } else if (result
!= L2CAP_MR_SUCCESS
) {
4995 l2cap_do_move_cancel(chan
, result
);
4997 switch (chan
->move_role
) {
4998 case L2CAP_MOVE_ROLE_INITIATOR
:
4999 l2cap_do_move_initiate(chan
, local_amp_id
,
5002 case L2CAP_MOVE_ROLE_RESPONDER
:
5003 l2cap_do_move_respond(chan
, result
);
5006 l2cap_do_move_cancel(chan
, result
);
5012 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
5013 struct l2cap_cmd_hdr
*cmd
,
5014 u16 cmd_len
, void *data
)
5016 struct l2cap_move_chan_req
*req
= data
;
5017 struct l2cap_move_chan_rsp rsp
;
5018 struct l2cap_chan
*chan
;
5020 u16 result
= L2CAP_MR_NOT_ALLOWED
;
5022 if (cmd_len
!= sizeof(*req
))
5025 icid
= le16_to_cpu(req
->icid
);
5027 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
5029 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
5032 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5034 rsp
.icid
= cpu_to_le16(icid
);
5035 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
5036 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
5041 chan
->ident
= cmd
->ident
;
5043 if (chan
->scid
< L2CAP_CID_DYN_START
||
5044 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
5045 (chan
->mode
!= L2CAP_MODE_ERTM
&&
5046 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
5047 result
= L2CAP_MR_NOT_ALLOWED
;
5048 goto send_move_response
;
5051 if (chan
->local_amp_id
== req
->dest_amp_id
) {
5052 result
= L2CAP_MR_SAME_ID
;
5053 goto send_move_response
;
5056 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
5057 struct hci_dev
*hdev
;
5058 hdev
= hci_dev_get(req
->dest_amp_id
);
5059 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
5060 !test_bit(HCI_UP
, &hdev
->flags
)) {
5064 result
= L2CAP_MR_BAD_ID
;
5065 goto send_move_response
;
5070 /* Detect a move collision. Only send a collision response
5071 * if this side has "lost", otherwise proceed with the move.
5072 * The winner has the larger bd_addr.
5074 if ((__chan_is_moving(chan
) ||
5075 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
5076 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
5077 result
= L2CAP_MR_COLLISION
;
5078 goto send_move_response
;
5081 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5082 l2cap_move_setup(chan
);
5083 chan
->move_id
= req
->dest_amp_id
;
5085 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
5086 /* Moving to BR/EDR */
5087 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5088 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5089 result
= L2CAP_MR_PEND
;
5091 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5092 result
= L2CAP_MR_SUCCESS
;
5095 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
5096 /* Placeholder - uncomment when amp functions are available */
5097 /*amp_accept_physical(chan, req->dest_amp_id);*/
5098 result
= L2CAP_MR_PEND
;
5102 l2cap_send_move_chan_rsp(chan
, result
);
5104 l2cap_chan_unlock(chan
);
5109 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
5111 struct l2cap_chan
*chan
;
5112 struct hci_chan
*hchan
= NULL
;
5114 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5116 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5120 __clear_chan_timer(chan
);
5121 if (result
== L2CAP_MR_PEND
)
5122 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5124 switch (chan
->move_state
) {
5125 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5126 /* Move confirm will be sent when logical link
5129 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5131 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5132 if (result
== L2CAP_MR_PEND
) {
5134 } else if (test_bit(CONN_LOCAL_BUSY
,
5135 &chan
->conn_state
)) {
5136 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5138 /* Logical link is up or moving to BR/EDR,
5141 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5142 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5145 case L2CAP_MOVE_WAIT_RSP
:
5147 if (result
== L2CAP_MR_SUCCESS
) {
5148 /* Remote is ready, send confirm immediately
5149 * after logical link is ready
5151 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5153 /* Both logical link and move success
5154 * are required to confirm
5156 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5159 /* Placeholder - get hci_chan for logical link */
5161 /* Logical link not available */
5162 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5166 /* If the logical link is not yet connected, do not
5167 * send confirmation.
5169 if (hchan
->state
!= BT_CONNECTED
)
5172 /* Logical link is already ready to go */
5174 chan
->hs_hcon
= hchan
->conn
;
5175 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5177 if (result
== L2CAP_MR_SUCCESS
) {
5178 /* Can confirm now */
5179 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5181 /* Now only need move success
5184 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5187 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5190 /* Any other amp move state means the move failed. */
5191 chan
->move_id
= chan
->local_amp_id
;
5192 l2cap_move_done(chan
);
5193 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5196 l2cap_chan_unlock(chan
);
5199 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5202 struct l2cap_chan
*chan
;
5204 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5206 /* Could not locate channel, icid is best guess */
5207 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5211 __clear_chan_timer(chan
);
5213 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5214 if (result
== L2CAP_MR_COLLISION
) {
5215 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5217 /* Cleanup - cancel move */
5218 chan
->move_id
= chan
->local_amp_id
;
5219 l2cap_move_done(chan
);
5223 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5225 l2cap_chan_unlock(chan
);
5228 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5229 struct l2cap_cmd_hdr
*cmd
,
5230 u16 cmd_len
, void *data
)
5232 struct l2cap_move_chan_rsp
*rsp
= data
;
5235 if (cmd_len
!= sizeof(*rsp
))
5238 icid
= le16_to_cpu(rsp
->icid
);
5239 result
= le16_to_cpu(rsp
->result
);
5241 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5243 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5244 l2cap_move_continue(conn
, icid
, result
);
5246 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5251 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5252 struct l2cap_cmd_hdr
*cmd
,
5253 u16 cmd_len
, void *data
)
5255 struct l2cap_move_chan_cfm
*cfm
= data
;
5256 struct l2cap_chan
*chan
;
5259 if (cmd_len
!= sizeof(*cfm
))
5262 icid
= le16_to_cpu(cfm
->icid
);
5263 result
= le16_to_cpu(cfm
->result
);
5265 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5267 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5269 /* Spec requires a response even if the icid was not found */
5270 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5274 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5275 if (result
== L2CAP_MC_CONFIRMED
) {
5276 chan
->local_amp_id
= chan
->move_id
;
5277 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5278 __release_logical_link(chan
);
5280 chan
->move_id
= chan
->local_amp_id
;
5283 l2cap_move_done(chan
);
5286 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5288 l2cap_chan_unlock(chan
);
5293 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5294 struct l2cap_cmd_hdr
*cmd
,
5295 u16 cmd_len
, void *data
)
5297 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5298 struct l2cap_chan
*chan
;
5301 if (cmd_len
!= sizeof(*rsp
))
5304 icid
= le16_to_cpu(rsp
->icid
);
5306 BT_DBG("icid 0x%4.4x", icid
);
5308 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5312 __clear_chan_timer(chan
);
5314 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5315 chan
->local_amp_id
= chan
->move_id
;
5317 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5318 __release_logical_link(chan
);
5320 l2cap_move_done(chan
);
5323 l2cap_chan_unlock(chan
);
5328 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5329 struct l2cap_cmd_hdr
*cmd
,
5330 u16 cmd_len
, u8
*data
)
5332 struct hci_conn
*hcon
= conn
->hcon
;
5333 struct l2cap_conn_param_update_req
*req
;
5334 struct l2cap_conn_param_update_rsp rsp
;
5335 u16 min
, max
, latency
, to_multiplier
;
5338 if (hcon
->role
!= HCI_ROLE_MASTER
)
5341 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5344 req
= (struct l2cap_conn_param_update_req
*) data
;
5345 min
= __le16_to_cpu(req
->min
);
5346 max
= __le16_to_cpu(req
->max
);
5347 latency
= __le16_to_cpu(req
->latency
);
5348 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5350 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5351 min
, max
, latency
, to_multiplier
);
5353 memset(&rsp
, 0, sizeof(rsp
));
5355 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5357 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5359 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5361 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5367 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5369 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5370 store_hint
, min
, max
, latency
,
5378 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5379 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5382 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5383 struct hci_conn
*hcon
= conn
->hcon
;
5384 u16 dcid
, mtu
, mps
, credits
, result
;
5385 struct l2cap_chan
*chan
;
5388 if (cmd_len
< sizeof(*rsp
))
5391 dcid
= __le16_to_cpu(rsp
->dcid
);
5392 mtu
= __le16_to_cpu(rsp
->mtu
);
5393 mps
= __le16_to_cpu(rsp
->mps
);
5394 credits
= __le16_to_cpu(rsp
->credits
);
5395 result
= __le16_to_cpu(rsp
->result
);
5397 if (result
== L2CAP_CR_LE_SUCCESS
&& (mtu
< 23 || mps
< 23 ||
5398 dcid
< L2CAP_CID_DYN_START
||
5399 dcid
> L2CAP_CID_LE_DYN_END
))
5402 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5403 dcid
, mtu
, mps
, credits
, result
);
5405 mutex_lock(&conn
->chan_lock
);
5407 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5415 l2cap_chan_lock(chan
);
5418 case L2CAP_CR_LE_SUCCESS
:
5419 if (__l2cap_get_chan_by_dcid(conn
, dcid
)) {
5427 chan
->remote_mps
= mps
;
5428 chan
->tx_credits
= credits
;
5429 l2cap_chan_ready(chan
);
5432 case L2CAP_CR_LE_AUTHENTICATION
:
5433 case L2CAP_CR_LE_ENCRYPTION
:
5434 /* If we already have MITM protection we can't do
5437 if (hcon
->sec_level
> BT_SECURITY_MEDIUM
) {
5438 l2cap_chan_del(chan
, ECONNREFUSED
);
5442 sec_level
= hcon
->sec_level
+ 1;
5443 if (chan
->sec_level
< sec_level
)
5444 chan
->sec_level
= sec_level
;
5446 /* We'll need to send a new Connect Request */
5447 clear_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
);
5449 smp_conn_security(hcon
, chan
->sec_level
);
5453 l2cap_chan_del(chan
, ECONNREFUSED
);
5457 l2cap_chan_unlock(chan
);
5460 mutex_unlock(&conn
->chan_lock
);
5465 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5466 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5471 switch (cmd
->code
) {
5472 case L2CAP_COMMAND_REJ
:
5473 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5476 case L2CAP_CONN_REQ
:
5477 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5480 case L2CAP_CONN_RSP
:
5481 case L2CAP_CREATE_CHAN_RSP
:
5482 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5485 case L2CAP_CONF_REQ
:
5486 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5489 case L2CAP_CONF_RSP
:
5490 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5493 case L2CAP_DISCONN_REQ
:
5494 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5497 case L2CAP_DISCONN_RSP
:
5498 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5501 case L2CAP_ECHO_REQ
:
5502 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5505 case L2CAP_ECHO_RSP
:
5508 case L2CAP_INFO_REQ
:
5509 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5512 case L2CAP_INFO_RSP
:
5513 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5516 case L2CAP_CREATE_CHAN_REQ
:
5517 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5520 case L2CAP_MOVE_CHAN_REQ
:
5521 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5524 case L2CAP_MOVE_CHAN_RSP
:
5525 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5528 case L2CAP_MOVE_CHAN_CFM
:
5529 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5532 case L2CAP_MOVE_CHAN_CFM_RSP
:
5533 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5537 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5545 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5546 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5549 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5550 struct l2cap_le_conn_rsp rsp
;
5551 struct l2cap_chan
*chan
, *pchan
;
5552 u16 dcid
, scid
, credits
, mtu
, mps
;
5556 if (cmd_len
!= sizeof(*req
))
5559 scid
= __le16_to_cpu(req
->scid
);
5560 mtu
= __le16_to_cpu(req
->mtu
);
5561 mps
= __le16_to_cpu(req
->mps
);
5566 if (mtu
< 23 || mps
< 23)
5569 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5572 /* Check if we have socket listening on psm */
5573 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5574 &conn
->hcon
->dst
, LE_LINK
);
5576 result
= L2CAP_CR_LE_BAD_PSM
;
5581 mutex_lock(&conn
->chan_lock
);
5582 l2cap_chan_lock(pchan
);
5584 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
,
5586 result
= L2CAP_CR_LE_AUTHENTICATION
;
5588 goto response_unlock
;
5591 /* Check for valid dynamic CID range */
5592 if (scid
< L2CAP_CID_DYN_START
|| scid
> L2CAP_CID_LE_DYN_END
) {
5593 result
= L2CAP_CR_LE_INVALID_SCID
;
5595 goto response_unlock
;
5598 /* Check if we already have channel with that dcid */
5599 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5600 result
= L2CAP_CR_LE_SCID_IN_USE
;
5602 goto response_unlock
;
5605 chan
= pchan
->ops
->new_connection(pchan
);
5607 result
= L2CAP_CR_LE_NO_MEM
;
5608 goto response_unlock
;
5611 bacpy(&chan
->src
, &conn
->hcon
->src
);
5612 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5613 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
5614 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
5618 chan
->remote_mps
= mps
;
5620 __l2cap_chan_add(conn
, chan
);
5622 l2cap_le_flowctl_init(chan
, __le16_to_cpu(req
->credits
));
5625 credits
= chan
->rx_credits
;
5627 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5629 chan
->ident
= cmd
->ident
;
5631 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5632 l2cap_state_change(chan
, BT_CONNECT2
);
5633 /* The following result value is actually not defined
5634 * for LE CoC but we use it to let the function know
5635 * that it should bail out after doing its cleanup
5636 * instead of sending a response.
5638 result
= L2CAP_CR_PEND
;
5639 chan
->ops
->defer(chan
);
5641 l2cap_chan_ready(chan
);
5642 result
= L2CAP_CR_LE_SUCCESS
;
5646 l2cap_chan_unlock(pchan
);
5647 mutex_unlock(&conn
->chan_lock
);
5648 l2cap_chan_put(pchan
);
5650 if (result
== L2CAP_CR_PEND
)
5655 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5656 rsp
.mps
= cpu_to_le16(chan
->mps
);
5662 rsp
.dcid
= cpu_to_le16(dcid
);
5663 rsp
.credits
= cpu_to_le16(credits
);
5664 rsp
.result
= cpu_to_le16(result
);
5666 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5671 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5672 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5675 struct l2cap_le_credits
*pkt
;
5676 struct l2cap_chan
*chan
;
5677 u16 cid
, credits
, max_credits
;
5679 if (cmd_len
!= sizeof(*pkt
))
5682 pkt
= (struct l2cap_le_credits
*) data
;
5683 cid
= __le16_to_cpu(pkt
->cid
);
5684 credits
= __le16_to_cpu(pkt
->credits
);
5686 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5688 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5692 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5693 if (credits
> max_credits
) {
5694 BT_ERR("LE credits overflow");
5695 l2cap_send_disconn_req(chan
, ECONNRESET
);
5696 l2cap_chan_unlock(chan
);
5698 /* Return 0 so that we don't trigger an unnecessary
5699 * command reject packet.
5704 chan
->tx_credits
+= credits
;
5706 /* Resume sending */
5707 l2cap_le_flowctl_send(chan
);
5709 if (chan
->tx_credits
)
5710 chan
->ops
->resume(chan
);
5712 l2cap_chan_unlock(chan
);
5717 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5718 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5721 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5722 struct l2cap_chan
*chan
;
5724 if (cmd_len
< sizeof(*rej
))
5727 mutex_lock(&conn
->chan_lock
);
5729 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5733 l2cap_chan_lock(chan
);
5734 l2cap_chan_del(chan
, ECONNREFUSED
);
5735 l2cap_chan_unlock(chan
);
5738 mutex_unlock(&conn
->chan_lock
);
5742 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5743 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5748 switch (cmd
->code
) {
5749 case L2CAP_COMMAND_REJ
:
5750 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5753 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5754 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5757 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5760 case L2CAP_LE_CONN_RSP
:
5761 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5764 case L2CAP_LE_CONN_REQ
:
5765 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5768 case L2CAP_LE_CREDITS
:
5769 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5772 case L2CAP_DISCONN_REQ
:
5773 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5776 case L2CAP_DISCONN_RSP
:
5777 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5781 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5789 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5790 struct sk_buff
*skb
)
5792 struct hci_conn
*hcon
= conn
->hcon
;
5793 struct l2cap_cmd_hdr
*cmd
;
5797 if (hcon
->type
!= LE_LINK
)
5800 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5803 cmd
= (void *) skb
->data
;
5804 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5806 len
= le16_to_cpu(cmd
->len
);
5808 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5810 if (len
!= skb
->len
|| !cmd
->ident
) {
5811 BT_DBG("corrupted command");
5815 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5817 struct l2cap_cmd_rej_unk rej
;
5819 BT_ERR("Wrong link type (%d)", err
);
5821 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5822 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5830 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5831 struct sk_buff
*skb
)
5833 struct hci_conn
*hcon
= conn
->hcon
;
5834 u8
*data
= skb
->data
;
5836 struct l2cap_cmd_hdr cmd
;
5839 l2cap_raw_recv(conn
, skb
);
5841 if (hcon
->type
!= ACL_LINK
)
5844 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5846 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5847 data
+= L2CAP_CMD_HDR_SIZE
;
5848 len
-= L2CAP_CMD_HDR_SIZE
;
5850 cmd_len
= le16_to_cpu(cmd
.len
);
5852 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5855 if (cmd_len
> len
|| !cmd
.ident
) {
5856 BT_DBG("corrupted command");
5860 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5862 struct l2cap_cmd_rej_unk rej
;
5864 BT_ERR("Wrong link type (%d)", err
);
5866 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5867 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5879 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5881 u16 our_fcs
, rcv_fcs
;
5884 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5885 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5887 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5889 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5890 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5891 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5892 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5894 if (our_fcs
!= rcv_fcs
)
5900 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5902 struct l2cap_ctrl control
;
5904 BT_DBG("chan %p", chan
);
5906 memset(&control
, 0, sizeof(control
));
5909 control
.reqseq
= chan
->buffer_seq
;
5910 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5912 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5913 control
.super
= L2CAP_SUPER_RNR
;
5914 l2cap_send_sframe(chan
, &control
);
5917 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5918 chan
->unacked_frames
> 0)
5919 __set_retrans_timer(chan
);
5921 /* Send pending iframes */
5922 l2cap_ertm_send(chan
);
5924 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5925 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5926 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5929 control
.super
= L2CAP_SUPER_RR
;
5930 l2cap_send_sframe(chan
, &control
);
5934 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5935 struct sk_buff
**last_frag
)
5937 /* skb->len reflects data in skb as well as all fragments
5938 * skb->data_len reflects only data in fragments
5940 if (!skb_has_frag_list(skb
))
5941 skb_shinfo(skb
)->frag_list
= new_frag
;
5943 new_frag
->next
= NULL
;
5945 (*last_frag
)->next
= new_frag
;
5946 *last_frag
= new_frag
;
5948 skb
->len
+= new_frag
->len
;
5949 skb
->data_len
+= new_frag
->len
;
5950 skb
->truesize
+= new_frag
->truesize
;
5953 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5954 struct l2cap_ctrl
*control
)
5958 switch (control
->sar
) {
5959 case L2CAP_SAR_UNSEGMENTED
:
5963 err
= chan
->ops
->recv(chan
, skb
);
5966 case L2CAP_SAR_START
:
5970 if (!pskb_may_pull(skb
, L2CAP_SDULEN_SIZE
))
5973 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5974 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5976 if (chan
->sdu_len
> chan
->imtu
) {
5981 if (skb
->len
>= chan
->sdu_len
)
5985 chan
->sdu_last_frag
= skb
;
5991 case L2CAP_SAR_CONTINUE
:
5995 append_skb_frag(chan
->sdu
, skb
,
5996 &chan
->sdu_last_frag
);
5999 if (chan
->sdu
->len
>= chan
->sdu_len
)
6009 append_skb_frag(chan
->sdu
, skb
,
6010 &chan
->sdu_last_frag
);
6013 if (chan
->sdu
->len
!= chan
->sdu_len
)
6016 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6019 /* Reassembly complete */
6021 chan
->sdu_last_frag
= NULL
;
6029 kfree_skb(chan
->sdu
);
6031 chan
->sdu_last_frag
= NULL
;
6038 static int l2cap_resegment(struct l2cap_chan
*chan
)
6044 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
6048 if (chan
->mode
!= L2CAP_MODE_ERTM
)
6051 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
6052 l2cap_tx(chan
, NULL
, NULL
, event
);
6055 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
6058 /* Pass sequential frames to l2cap_reassemble_sdu()
6059 * until a gap is encountered.
6062 BT_DBG("chan %p", chan
);
6064 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6065 struct sk_buff
*skb
;
6066 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6067 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
6069 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
6074 skb_unlink(skb
, &chan
->srej_q
);
6075 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6076 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->l2cap
);
6081 if (skb_queue_empty(&chan
->srej_q
)) {
6082 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6083 l2cap_send_ack(chan
);
6089 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
6090 struct l2cap_ctrl
*control
)
6092 struct sk_buff
*skb
;
6094 BT_DBG("chan %p, control %p", chan
, control
);
6096 if (control
->reqseq
== chan
->next_tx_seq
) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6098 l2cap_send_disconn_req(chan
, ECONNRESET
);
6102 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6105 BT_DBG("Seq %d not available for retransmission",
6110 if (chan
->max_tx
!= 0 && bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
6111 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6112 l2cap_send_disconn_req(chan
, ECONNRESET
);
6116 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6118 if (control
->poll
) {
6119 l2cap_pass_to_tx(chan
, control
);
6121 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6122 l2cap_retransmit(chan
, control
);
6123 l2cap_ertm_send(chan
);
6125 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6126 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6127 chan
->srej_save_reqseq
= control
->reqseq
;
6130 l2cap_pass_to_tx_fbit(chan
, control
);
6132 if (control
->final
) {
6133 if (chan
->srej_save_reqseq
!= control
->reqseq
||
6134 !test_and_clear_bit(CONN_SREJ_ACT
,
6136 l2cap_retransmit(chan
, control
);
6138 l2cap_retransmit(chan
, control
);
6139 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6140 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6141 chan
->srej_save_reqseq
= control
->reqseq
;
6147 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
6148 struct l2cap_ctrl
*control
)
6150 struct sk_buff
*skb
;
6152 BT_DBG("chan %p, control %p", chan
, control
);
6154 if (control
->reqseq
== chan
->next_tx_seq
) {
6155 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6156 l2cap_send_disconn_req(chan
, ECONNRESET
);
6160 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6162 if (chan
->max_tx
&& skb
&&
6163 bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
6164 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6165 l2cap_send_disconn_req(chan
, ECONNRESET
);
6169 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6171 l2cap_pass_to_tx(chan
, control
);
6173 if (control
->final
) {
6174 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6175 l2cap_retransmit_all(chan
, control
);
6177 l2cap_retransmit_all(chan
, control
);
6178 l2cap_ertm_send(chan
);
6179 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6180 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6184 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6186 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6188 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6189 chan
->expected_tx_seq
);
6191 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6192 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6194 /* See notes below regarding "double poll" and
6197 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - after SREJ");
6199 return L2CAP_TXSEQ_INVALID_IGNORE
;
6201 BT_DBG("Invalid - in window after SREJ sent");
6202 return L2CAP_TXSEQ_INVALID
;
6206 if (chan
->srej_list
.head
== txseq
) {
6207 BT_DBG("Expected SREJ");
6208 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6211 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6212 BT_DBG("Duplicate SREJ - txseq already stored");
6213 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6216 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6217 BT_DBG("Unexpected SREJ - not requested");
6218 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6222 if (chan
->expected_tx_seq
== txseq
) {
6223 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6225 BT_DBG("Invalid - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID
;
6229 return L2CAP_TXSEQ_EXPECTED
;
6233 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6234 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6235 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6236 return L2CAP_TXSEQ_DUPLICATE
;
6239 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6240 /* A source of invalid packets is a "double poll" condition,
6241 * where delays cause us to send multiple poll packets. If
6242 * the remote stack receives and processes both polls,
6243 * sequence numbers can wrap around in such a way that a
6244 * resent frame has a sequence number that looks like new data
6245 * with a sequence gap. This would trigger an erroneous SREJ
6248 * Fortunately, this is impossible with a tx window that's
6249 * less than half of the maximum sequence number, which allows
6250 * invalid frames to be safely ignored.
6252 * With tx window sizes greater than half of the tx window
6253 * maximum, the frame is invalid and cannot be ignored. This
6254 * causes a disconnect.
6257 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6258 BT_DBG("Invalid/Ignore - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID_IGNORE
;
6261 BT_DBG("Invalid - txseq outside tx window");
6262 return L2CAP_TXSEQ_INVALID
;
6265 BT_DBG("Unexpected - txseq indicates missing frames");
6266 return L2CAP_TXSEQ_UNEXPECTED
;
6270 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6271 struct l2cap_ctrl
*control
,
6272 struct sk_buff
*skb
, u8 event
)
6275 bool skb_in_use
= false;
6277 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6281 case L2CAP_EV_RECV_IFRAME
:
6282 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6283 case L2CAP_TXSEQ_EXPECTED
:
6284 l2cap_pass_to_tx(chan
, control
);
6286 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6287 BT_DBG("Busy, discarding expected seq %d",
6292 chan
->expected_tx_seq
= __next_seq(chan
,
6295 chan
->buffer_seq
= chan
->expected_tx_seq
;
6298 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6302 if (control
->final
) {
6303 if (!test_and_clear_bit(CONN_REJ_ACT
,
6304 &chan
->conn_state
)) {
6306 l2cap_retransmit_all(chan
, control
);
6307 l2cap_ertm_send(chan
);
6311 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6312 l2cap_send_ack(chan
);
6314 case L2CAP_TXSEQ_UNEXPECTED
:
6315 l2cap_pass_to_tx(chan
, control
);
6317 /* Can't issue SREJ frames in the local busy state.
6318 * Drop this frame, it will be seen as missing
6319 * when local busy is exited.
6321 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6322 BT_DBG("Busy, discarding unexpected seq %d",
6327 /* There was a gap in the sequence, so an SREJ
6328 * must be sent for each missing frame. The
6329 * current frame is stored for later use.
6331 skb_queue_tail(&chan
->srej_q
, skb
);
6333 BT_DBG("Queued %p (queue len %d)", skb
,
6334 skb_queue_len(&chan
->srej_q
));
6336 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6337 l2cap_seq_list_clear(&chan
->srej_list
);
6338 l2cap_send_srej(chan
, control
->txseq
);
6340 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6342 case L2CAP_TXSEQ_DUPLICATE
:
6343 l2cap_pass_to_tx(chan
, control
);
6345 case L2CAP_TXSEQ_INVALID_IGNORE
:
6347 case L2CAP_TXSEQ_INVALID
:
6349 l2cap_send_disconn_req(chan
, ECONNRESET
);
6353 case L2CAP_EV_RECV_RR
:
6354 l2cap_pass_to_tx(chan
, control
);
6355 if (control
->final
) {
6356 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6358 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6359 !__chan_is_moving(chan
)) {
6361 l2cap_retransmit_all(chan
, control
);
6364 l2cap_ertm_send(chan
);
6365 } else if (control
->poll
) {
6366 l2cap_send_i_or_rr_or_rnr(chan
);
6368 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6369 &chan
->conn_state
) &&
6370 chan
->unacked_frames
)
6371 __set_retrans_timer(chan
);
6373 l2cap_ertm_send(chan
);
6376 case L2CAP_EV_RECV_RNR
:
6377 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6378 l2cap_pass_to_tx(chan
, control
);
6379 if (control
&& control
->poll
) {
6380 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6381 l2cap_send_rr_or_rnr(chan
, 0);
6383 __clear_retrans_timer(chan
);
6384 l2cap_seq_list_clear(&chan
->retrans_list
);
6386 case L2CAP_EV_RECV_REJ
:
6387 l2cap_handle_rej(chan
, control
);
6389 case L2CAP_EV_RECV_SREJ
:
6390 l2cap_handle_srej(chan
, control
);
6396 if (skb
&& !skb_in_use
) {
6397 BT_DBG("Freeing %p", skb
);
6404 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6405 struct l2cap_ctrl
*control
,
6406 struct sk_buff
*skb
, u8 event
)
6409 u16 txseq
= control
->txseq
;
6410 bool skb_in_use
= false;
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6416 case L2CAP_EV_RECV_IFRAME
:
6417 switch (l2cap_classify_txseq(chan
, txseq
)) {
6418 case L2CAP_TXSEQ_EXPECTED
:
6419 /* Keep frame for reassembly later */
6420 l2cap_pass_to_tx(chan
, control
);
6421 skb_queue_tail(&chan
->srej_q
, skb
);
6423 BT_DBG("Queued %p (queue len %d)", skb
,
6424 skb_queue_len(&chan
->srej_q
));
6426 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6428 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6429 l2cap_seq_list_pop(&chan
->srej_list
);
6431 l2cap_pass_to_tx(chan
, control
);
6432 skb_queue_tail(&chan
->srej_q
, skb
);
6434 BT_DBG("Queued %p (queue len %d)", skb
,
6435 skb_queue_len(&chan
->srej_q
));
6437 err
= l2cap_rx_queued_iframes(chan
);
6442 case L2CAP_TXSEQ_UNEXPECTED
:
6443 /* Got a frame that can't be reassembled yet.
6444 * Save it for later, and send SREJs to cover
6445 * the missing frames.
6447 skb_queue_tail(&chan
->srej_q
, skb
);
6449 BT_DBG("Queued %p (queue len %d)", skb
,
6450 skb_queue_len(&chan
->srej_q
));
6452 l2cap_pass_to_tx(chan
, control
);
6453 l2cap_send_srej(chan
, control
->txseq
);
6455 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6456 /* This frame was requested with an SREJ, but
6457 * some expected retransmitted frames are
6458 * missing. Request retransmission of missing
6461 skb_queue_tail(&chan
->srej_q
, skb
);
6463 BT_DBG("Queued %p (queue len %d)", skb
,
6464 skb_queue_len(&chan
->srej_q
));
6466 l2cap_pass_to_tx(chan
, control
);
6467 l2cap_send_srej_list(chan
, control
->txseq
);
6469 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6470 /* We've already queued this frame. Drop this copy. */
6471 l2cap_pass_to_tx(chan
, control
);
6473 case L2CAP_TXSEQ_DUPLICATE
:
6474 /* Expecting a later sequence number, so this frame
6475 * was already received. Ignore it completely.
6478 case L2CAP_TXSEQ_INVALID_IGNORE
:
6480 case L2CAP_TXSEQ_INVALID
:
6482 l2cap_send_disconn_req(chan
, ECONNRESET
);
6486 case L2CAP_EV_RECV_RR
:
6487 l2cap_pass_to_tx(chan
, control
);
6488 if (control
->final
) {
6489 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6491 if (!test_and_clear_bit(CONN_REJ_ACT
,
6492 &chan
->conn_state
)) {
6494 l2cap_retransmit_all(chan
, control
);
6497 l2cap_ertm_send(chan
);
6498 } else if (control
->poll
) {
6499 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6500 &chan
->conn_state
) &&
6501 chan
->unacked_frames
) {
6502 __set_retrans_timer(chan
);
6505 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6506 l2cap_send_srej_tail(chan
);
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6509 &chan
->conn_state
) &&
6510 chan
->unacked_frames
)
6511 __set_retrans_timer(chan
);
6513 l2cap_send_ack(chan
);
6516 case L2CAP_EV_RECV_RNR
:
6517 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6518 l2cap_pass_to_tx(chan
, control
);
6519 if (control
->poll
) {
6520 l2cap_send_srej_tail(chan
);
6522 struct l2cap_ctrl rr_control
;
6523 memset(&rr_control
, 0, sizeof(rr_control
));
6524 rr_control
.sframe
= 1;
6525 rr_control
.super
= L2CAP_SUPER_RR
;
6526 rr_control
.reqseq
= chan
->buffer_seq
;
6527 l2cap_send_sframe(chan
, &rr_control
);
6531 case L2CAP_EV_RECV_REJ
:
6532 l2cap_handle_rej(chan
, control
);
6534 case L2CAP_EV_RECV_SREJ
:
6535 l2cap_handle_srej(chan
, control
);
6539 if (skb
&& !skb_in_use
) {
6540 BT_DBG("Freeing %p", skb
);
6547 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6549 BT_DBG("chan %p", chan
);
6551 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6554 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6556 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6558 return l2cap_resegment(chan
);
6561 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6562 struct l2cap_ctrl
*control
,
6563 struct sk_buff
*skb
, u8 event
)
6567 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6573 l2cap_process_reqseq(chan
, control
->reqseq
);
6575 if (!skb_queue_empty(&chan
->tx_q
))
6576 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6578 chan
->tx_send_head
= NULL
;
6580 /* Rewind next_tx_seq to the point expected
6583 chan
->next_tx_seq
= control
->reqseq
;
6584 chan
->unacked_frames
= 0;
6586 err
= l2cap_finish_move(chan
);
6590 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6591 l2cap_send_i_or_rr_or_rnr(chan
);
6593 if (event
== L2CAP_EV_RECV_IFRAME
)
6596 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6599 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6600 struct l2cap_ctrl
*control
,
6601 struct sk_buff
*skb
, u8 event
)
6605 if (!control
->final
)
6608 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6610 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6611 l2cap_process_reqseq(chan
, control
->reqseq
);
6613 if (!skb_queue_empty(&chan
->tx_q
))
6614 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6616 chan
->tx_send_head
= NULL
;
6618 /* Rewind next_tx_seq to the point expected
6621 chan
->next_tx_seq
= control
->reqseq
;
6622 chan
->unacked_frames
= 0;
6625 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6627 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6629 err
= l2cap_resegment(chan
);
6632 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6637 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6639 /* Make sure reqseq is for a packet that has been sent but not acked */
6642 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6643 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6646 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6647 struct sk_buff
*skb
, u8 event
)
6651 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6652 control
, skb
, event
, chan
->rx_state
);
6654 if (__valid_reqseq(chan
, control
->reqseq
)) {
6655 switch (chan
->rx_state
) {
6656 case L2CAP_RX_STATE_RECV
:
6657 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6659 case L2CAP_RX_STATE_SREJ_SENT
:
6660 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6663 case L2CAP_RX_STATE_WAIT_P
:
6664 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6666 case L2CAP_RX_STATE_WAIT_F
:
6667 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6674 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6675 control
->reqseq
, chan
->next_tx_seq
,
6676 chan
->expected_ack_seq
);
6677 l2cap_send_disconn_req(chan
, ECONNRESET
);
6683 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6684 struct sk_buff
*skb
)
6686 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6689 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6690 L2CAP_TXSEQ_EXPECTED
) {
6691 l2cap_pass_to_tx(chan
, control
);
6693 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6694 __next_seq(chan
, chan
->buffer_seq
));
6696 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6698 l2cap_reassemble_sdu(chan
, skb
, control
);
6701 kfree_skb(chan
->sdu
);
6704 chan
->sdu_last_frag
= NULL
;
6708 BT_DBG("Freeing %p", skb
);
6713 chan
->last_acked_seq
= control
->txseq
;
6714 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6719 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6721 struct l2cap_ctrl
*control
= &bt_cb(skb
)->l2cap
;
6725 __unpack_control(chan
, skb
);
6730 * We can just drop the corrupted I-frame here.
6731 * Receiver will miss it and start proper recovery
6732 * procedures and ask for retransmission.
6734 if (l2cap_check_fcs(chan
, skb
))
6737 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6738 len
-= L2CAP_SDULEN_SIZE
;
6740 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6741 len
-= L2CAP_FCS_SIZE
;
6743 if (len
> chan
->mps
) {
6744 l2cap_send_disconn_req(chan
, ECONNRESET
);
6748 if ((chan
->mode
== L2CAP_MODE_ERTM
||
6749 chan
->mode
== L2CAP_MODE_STREAMING
) && sk_filter(chan
->data
, skb
))
6752 if (!control
->sframe
) {
6755 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6756 control
->sar
, control
->reqseq
, control
->final
,
6759 /* Validate F-bit - F=0 always valid, F=1 only
6760 * valid in TX WAIT_F
6762 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6765 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6766 event
= L2CAP_EV_RECV_IFRAME
;
6767 err
= l2cap_rx(chan
, control
, skb
, event
);
6769 err
= l2cap_stream_rx(chan
, control
, skb
);
6773 l2cap_send_disconn_req(chan
, ECONNRESET
);
6775 const u8 rx_func_to_event
[4] = {
6776 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6777 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6780 /* Only I-frames are expected in streaming mode */
6781 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6784 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6785 control
->reqseq
, control
->final
, control
->poll
,
6789 BT_ERR("Trailing bytes: %d in sframe", len
);
6790 l2cap_send_disconn_req(chan
, ECONNRESET
);
6794 /* Validate F and P bits */
6795 if (control
->final
&& (control
->poll
||
6796 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6799 event
= rx_func_to_event
[control
->super
];
6800 if (l2cap_rx(chan
, control
, skb
, event
))
6801 l2cap_send_disconn_req(chan
, ECONNRESET
);
6811 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6813 struct l2cap_conn
*conn
= chan
->conn
;
6814 struct l2cap_le_credits pkt
;
6817 return_credits
= ((chan
->imtu
/ chan
->mps
) + 1) - chan
->rx_credits
;
6819 if (!return_credits
)
6822 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6824 chan
->rx_credits
+= return_credits
;
6826 pkt
.cid
= cpu_to_le16(chan
->scid
);
6827 pkt
.credits
= cpu_to_le16(return_credits
);
6829 chan
->ident
= l2cap_get_ident(conn
);
6831 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6834 static int l2cap_le_recv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6838 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan
, skb
->len
);
6840 /* Wait recv to confirm reception before updating the credits */
6841 err
= chan
->ops
->recv(chan
, skb
);
6843 /* Update credits whenever an SDU is received */
6844 l2cap_chan_le_send_credits(chan
);
6849 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6853 if (!chan
->rx_credits
) {
6854 BT_ERR("No credits to receive LE L2CAP data");
6855 l2cap_send_disconn_req(chan
, ECONNRESET
);
6859 if (chan
->imtu
< skb
->len
) {
6860 BT_ERR("Too big LE L2CAP PDU");
6865 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6867 /* Update if remote had run out of credits, this should only happens
6868 * if the remote is not using the entire MPS.
6870 if (!chan
->rx_credits
)
6871 l2cap_chan_le_send_credits(chan
);
6878 sdu_len
= get_unaligned_le16(skb
->data
);
6879 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6881 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6882 sdu_len
, skb
->len
, chan
->imtu
);
6884 if (sdu_len
> chan
->imtu
) {
6885 BT_ERR("Too big LE L2CAP SDU length received");
6890 if (skb
->len
> sdu_len
) {
6891 BT_ERR("Too much LE L2CAP data received");
6896 if (skb
->len
== sdu_len
)
6897 return l2cap_le_recv(chan
, skb
);
6900 chan
->sdu_len
= sdu_len
;
6901 chan
->sdu_last_frag
= skb
;
6903 /* Detect if remote is not able to use the selected MPS */
6904 if (skb
->len
+ L2CAP_SDULEN_SIZE
< chan
->mps
) {
6905 u16 mps_len
= skb
->len
+ L2CAP_SDULEN_SIZE
;
6907 /* Adjust the number of credits */
6908 BT_DBG("chan->mps %u -> %u", chan
->mps
, mps_len
);
6909 chan
->mps
= mps_len
;
6910 l2cap_chan_le_send_credits(chan
);
6916 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6917 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6919 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6920 BT_ERR("Too much LE L2CAP data received");
6925 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6928 if (chan
->sdu
->len
== chan
->sdu_len
) {
6929 err
= l2cap_le_recv(chan
, chan
->sdu
);
6932 chan
->sdu_last_frag
= NULL
;
6940 kfree_skb(chan
->sdu
);
6942 chan
->sdu_last_frag
= NULL
;
6946 /* We can't return an error here since we took care of the skb
6947 * freeing internally. An error return would cause the caller to
6948 * do a double-free of the skb.
6953 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6954 struct sk_buff
*skb
)
6956 struct l2cap_chan
*chan
;
6958 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6960 if (cid
== L2CAP_CID_A2MP
) {
6961 chan
= a2mp_channel_create(conn
, skb
);
6967 l2cap_chan_lock(chan
);
6969 BT_DBG("unknown cid 0x%4.4x", cid
);
6970 /* Drop packet and return */
6976 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6978 /* If we receive data on a fixed channel before the info req/rsp
6979 * procdure is done simply assume that the channel is supported
6980 * and mark it as ready.
6982 if (chan
->chan_type
== L2CAP_CHAN_FIXED
)
6983 l2cap_chan_ready(chan
);
6985 if (chan
->state
!= BT_CONNECTED
)
6988 switch (chan
->mode
) {
6989 case L2CAP_MODE_LE_FLOWCTL
:
6990 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6995 case L2CAP_MODE_BASIC
:
6996 /* If socket recv buffers overflows we drop data here
6997 * which is *bad* because L2CAP has to be reliable.
6998 * But we don't have any other choice. L2CAP doesn't
6999 * provide flow control mechanism. */
7001 if (chan
->imtu
< skb
->len
) {
7002 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7006 if (!chan
->ops
->recv(chan
, skb
))
7010 case L2CAP_MODE_ERTM
:
7011 case L2CAP_MODE_STREAMING
:
7012 l2cap_data_rcv(chan
, skb
);
7016 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
7024 l2cap_chan_unlock(chan
);
7027 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
7028 struct sk_buff
*skb
)
7030 struct hci_conn
*hcon
= conn
->hcon
;
7031 struct l2cap_chan
*chan
;
7033 if (hcon
->type
!= ACL_LINK
)
7036 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
7041 BT_DBG("chan %p, len %d", chan
, skb
->len
);
7043 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
7046 if (chan
->imtu
< skb
->len
)
7049 /* Store remote BD_ADDR and PSM for msg_name */
7050 bacpy(&bt_cb(skb
)->l2cap
.bdaddr
, &hcon
->dst
);
7051 bt_cb(skb
)->l2cap
.psm
= psm
;
7053 if (!chan
->ops
->recv(chan
, skb
)) {
7054 l2cap_chan_put(chan
);
7059 l2cap_chan_put(chan
);
7064 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
7066 struct l2cap_hdr
*lh
= (void *) skb
->data
;
7067 struct hci_conn
*hcon
= conn
->hcon
;
7071 if (hcon
->state
!= BT_CONNECTED
) {
7072 BT_DBG("queueing pending rx skb");
7073 skb_queue_tail(&conn
->pending_rx
, skb
);
7077 skb_pull(skb
, L2CAP_HDR_SIZE
);
7078 cid
= __le16_to_cpu(lh
->cid
);
7079 len
= __le16_to_cpu(lh
->len
);
7081 if (len
!= skb
->len
) {
7086 /* Since we can't actively block incoming LE connections we must
7087 * at least ensure that we ignore incoming data from them.
7089 if (hcon
->type
== LE_LINK
&&
7090 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
7091 bdaddr_dst_type(hcon
))) {
7096 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
7099 case L2CAP_CID_SIGNALING
:
7100 l2cap_sig_channel(conn
, skb
);
7103 case L2CAP_CID_CONN_LESS
:
7104 psm
= get_unaligned((__le16
*) skb
->data
);
7105 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
7106 l2cap_conless_channel(conn
, psm
, skb
);
7109 case L2CAP_CID_LE_SIGNALING
:
7110 l2cap_le_sig_channel(conn
, skb
);
7114 l2cap_data_channel(conn
, cid
, skb
);
7119 static void process_pending_rx(struct work_struct
*work
)
7121 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
7123 struct sk_buff
*skb
;
7127 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
7128 l2cap_recv_frame(conn
, skb
);
7131 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
7133 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7134 struct hci_chan
*hchan
;
7139 hchan
= hci_chan_create(hcon
);
7143 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
7145 hci_chan_del(hchan
);
7149 kref_init(&conn
->ref
);
7150 hcon
->l2cap_data
= conn
;
7151 conn
->hcon
= hci_conn_get(hcon
);
7152 conn
->hchan
= hchan
;
7154 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
7156 switch (hcon
->type
) {
7158 if (hcon
->hdev
->le_mtu
) {
7159 conn
->mtu
= hcon
->hdev
->le_mtu
;
7164 conn
->mtu
= hcon
->hdev
->acl_mtu
;
7168 conn
->feat_mask
= 0;
7170 conn
->local_fixed_chan
= L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
;
7172 if (hcon
->type
== ACL_LINK
&&
7173 hci_dev_test_flag(hcon
->hdev
, HCI_HS_ENABLED
))
7174 conn
->local_fixed_chan
|= L2CAP_FC_A2MP
;
7176 if (hci_dev_test_flag(hcon
->hdev
, HCI_LE_ENABLED
) &&
7177 (bredr_sc_enabled(hcon
->hdev
) ||
7178 hci_dev_test_flag(hcon
->hdev
, HCI_FORCE_BREDR_SMP
)))
7179 conn
->local_fixed_chan
|= L2CAP_FC_SMP_BREDR
;
7181 mutex_init(&conn
->ident_lock
);
7182 mutex_init(&conn
->chan_lock
);
7184 INIT_LIST_HEAD(&conn
->chan_l
);
7185 INIT_LIST_HEAD(&conn
->users
);
7187 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7189 skb_queue_head_init(&conn
->pending_rx
);
7190 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7191 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
7193 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7198 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7202 if (bdaddr_type_is_le(dst_type
))
7203 return (psm
<= 0x00ff);
7205 /* PSM must be odd and lsb of upper byte must be 0 */
7206 return ((psm
& 0x0101) == 0x0001);
7209 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7210 bdaddr_t
*dst
, u8 dst_type
)
7212 struct l2cap_conn
*conn
;
7213 struct hci_conn
*hcon
;
7214 struct hci_dev
*hdev
;
7217 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7218 dst_type
, __le16_to_cpu(psm
));
7220 hdev
= hci_get_route(dst
, &chan
->src
, chan
->src_type
);
7222 return -EHOSTUNREACH
;
7226 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7227 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7232 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7237 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7242 switch (chan
->mode
) {
7243 case L2CAP_MODE_BASIC
:
7245 case L2CAP_MODE_LE_FLOWCTL
:
7247 case L2CAP_MODE_ERTM
:
7248 case L2CAP_MODE_STREAMING
:
7257 switch (chan
->state
) {
7261 /* Already connecting */
7266 /* Already connected */
7280 /* Set destination address and psm */
7281 bacpy(&chan
->dst
, dst
);
7282 chan
->dst_type
= dst_type
;
7287 if (bdaddr_type_is_le(dst_type
)) {
7288 /* Convert from L2CAP channel address type to HCI address type
7290 if (dst_type
== BDADDR_LE_PUBLIC
)
7291 dst_type
= ADDR_LE_DEV_PUBLIC
;
7293 dst_type
= ADDR_LE_DEV_RANDOM
;
7295 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7296 hcon
= hci_connect_le(hdev
, dst
, dst_type
,
7298 HCI_LE_CONN_TIMEOUT
,
7299 HCI_ROLE_SLAVE
, NULL
);
7301 hcon
= hci_connect_le_scan(hdev
, dst
, dst_type
,
7303 HCI_LE_CONN_TIMEOUT
);
7306 u8 auth_type
= l2cap_get_auth_type(chan
);
7307 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7311 err
= PTR_ERR(hcon
);
7315 conn
= l2cap_conn_add(hcon
);
7317 hci_conn_drop(hcon
);
7322 mutex_lock(&conn
->chan_lock
);
7323 l2cap_chan_lock(chan
);
7325 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7326 hci_conn_drop(hcon
);
7331 /* Update source addr of the socket */
7332 bacpy(&chan
->src
, &hcon
->src
);
7333 chan
->src_type
= bdaddr_src_type(hcon
);
7335 __l2cap_chan_add(conn
, chan
);
7337 /* l2cap_chan_add takes its own ref so we can drop this one */
7338 hci_conn_drop(hcon
);
7340 l2cap_state_change(chan
, BT_CONNECT
);
7341 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7343 /* Release chan->sport so that it can be reused by other
7344 * sockets (as it's only used for listening sockets).
7346 write_lock(&chan_list_lock
);
7348 write_unlock(&chan_list_lock
);
7350 if (hcon
->state
== BT_CONNECTED
) {
7351 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7352 __clear_chan_timer(chan
);
7353 if (l2cap_chan_check_security(chan
, true))
7354 l2cap_state_change(chan
, BT_CONNECTED
);
7356 l2cap_do_start(chan
);
7362 l2cap_chan_unlock(chan
);
7363 mutex_unlock(&conn
->chan_lock
);
7365 hci_dev_unlock(hdev
);
7369 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7371 /* ---- L2CAP interface with lower layer (HCI) ---- */
7373 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7375 int exact
= 0, lm1
= 0, lm2
= 0;
7376 struct l2cap_chan
*c
;
7378 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7380 /* Find listening sockets and check their link_mode */
7381 read_lock(&chan_list_lock
);
7382 list_for_each_entry(c
, &chan_list
, global_l
) {
7383 if (c
->state
!= BT_LISTEN
)
7386 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7387 lm1
|= HCI_LM_ACCEPT
;
7388 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7389 lm1
|= HCI_LM_MASTER
;
7391 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7392 lm2
|= HCI_LM_ACCEPT
;
7393 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7394 lm2
|= HCI_LM_MASTER
;
7397 read_unlock(&chan_list_lock
);
7399 return exact
? lm1
: lm2
;
7402 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7403 * from an existing channel in the list or from the beginning of the
7404 * global list (by passing NULL as first parameter).
7406 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7407 struct hci_conn
*hcon
)
7409 u8 src_type
= bdaddr_src_type(hcon
);
7411 read_lock(&chan_list_lock
);
7414 c
= list_next_entry(c
, global_l
);
7416 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7418 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7419 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7421 if (c
->state
!= BT_LISTEN
)
7423 if (bacmp(&c
->src
, &hcon
->src
) && bacmp(&c
->src
, BDADDR_ANY
))
7425 if (src_type
!= c
->src_type
)
7429 read_unlock(&chan_list_lock
);
7433 read_unlock(&chan_list_lock
);
7438 static void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7440 struct hci_dev
*hdev
= hcon
->hdev
;
7441 struct l2cap_conn
*conn
;
7442 struct l2cap_chan
*pchan
;
7445 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7448 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7451 l2cap_conn_del(hcon
, bt_to_errno(status
));
7455 conn
= l2cap_conn_add(hcon
);
7459 dst_type
= bdaddr_dst_type(hcon
);
7461 /* If device is blocked, do not create channels for it */
7462 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7465 /* Find fixed channels and notify them of the new connection. We
7466 * use multiple individual lookups, continuing each time where
7467 * we left off, because the list lock would prevent calling the
7468 * potentially sleeping l2cap_chan_lock() function.
7470 pchan
= l2cap_global_fixed_chan(NULL
, hcon
);
7472 struct l2cap_chan
*chan
, *next
;
7474 /* Client fixed channels should override server ones */
7475 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7478 l2cap_chan_lock(pchan
);
7479 chan
= pchan
->ops
->new_connection(pchan
);
7481 bacpy(&chan
->src
, &hcon
->src
);
7482 bacpy(&chan
->dst
, &hcon
->dst
);
7483 chan
->src_type
= bdaddr_src_type(hcon
);
7484 chan
->dst_type
= dst_type
;
7486 __l2cap_chan_add(conn
, chan
);
7489 l2cap_chan_unlock(pchan
);
7491 next
= l2cap_global_fixed_chan(pchan
, hcon
);
7492 l2cap_chan_put(pchan
);
7496 l2cap_conn_ready(conn
);
7499 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7501 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7503 BT_DBG("hcon %p", hcon
);
7506 return HCI_ERROR_REMOTE_USER_TERM
;
7507 return conn
->disc_reason
;
7510 static void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7512 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7515 BT_DBG("hcon %p reason %d", hcon
, reason
);
7517 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7520 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7522 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7525 if (encrypt
== 0x00) {
7526 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7527 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7528 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7529 chan
->sec_level
== BT_SECURITY_FIPS
)
7530 l2cap_chan_close(chan
, ECONNREFUSED
);
7532 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7533 __clear_chan_timer(chan
);
7537 static void l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7539 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7540 struct l2cap_chan
*chan
;
7545 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7547 mutex_lock(&conn
->chan_lock
);
7549 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7550 l2cap_chan_lock(chan
);
7552 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7553 state_to_string(chan
->state
));
7555 if (chan
->scid
== L2CAP_CID_A2MP
) {
7556 l2cap_chan_unlock(chan
);
7560 if (!status
&& encrypt
)
7561 chan
->sec_level
= hcon
->sec_level
;
7563 if (!__l2cap_no_conn_pending(chan
)) {
7564 l2cap_chan_unlock(chan
);
7568 if (!status
&& (chan
->state
== BT_CONNECTED
||
7569 chan
->state
== BT_CONFIG
)) {
7570 chan
->ops
->resume(chan
);
7571 l2cap_check_encryption(chan
, encrypt
);
7572 l2cap_chan_unlock(chan
);
7576 if (chan
->state
== BT_CONNECT
) {
7577 if (!status
&& l2cap_check_enc_key_size(hcon
))
7578 l2cap_start_connection(chan
);
7580 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7581 } else if (chan
->state
== BT_CONNECT2
&&
7582 chan
->mode
!= L2CAP_MODE_LE_FLOWCTL
) {
7583 struct l2cap_conn_rsp rsp
;
7586 if (!status
&& l2cap_check_enc_key_size(hcon
)) {
7587 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7588 res
= L2CAP_CR_PEND
;
7589 stat
= L2CAP_CS_AUTHOR_PEND
;
7590 chan
->ops
->defer(chan
);
7592 l2cap_state_change(chan
, BT_CONFIG
);
7593 res
= L2CAP_CR_SUCCESS
;
7594 stat
= L2CAP_CS_NO_INFO
;
7597 l2cap_state_change(chan
, BT_DISCONN
);
7598 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7599 res
= L2CAP_CR_SEC_BLOCK
;
7600 stat
= L2CAP_CS_NO_INFO
;
7603 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7604 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7605 rsp
.result
= cpu_to_le16(res
);
7606 rsp
.status
= cpu_to_le16(stat
);
7607 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7610 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7611 res
== L2CAP_CR_SUCCESS
) {
7613 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7614 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7616 l2cap_build_conf_req(chan
, buf
, sizeof(buf
)),
7618 chan
->num_conf_req
++;
7622 l2cap_chan_unlock(chan
);
7625 mutex_unlock(&conn
->chan_lock
);
7628 void l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7630 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7631 struct l2cap_hdr
*hdr
;
7634 /* For AMP controller do not create l2cap conn */
7635 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_PRIMARY
)
7639 conn
= l2cap_conn_add(hcon
);
7644 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7648 case ACL_START_NO_FLUSH
:
7651 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7652 kfree_skb(conn
->rx_skb
);
7653 conn
->rx_skb
= NULL
;
7655 l2cap_conn_unreliable(conn
, ECOMM
);
7658 /* Start fragment always begin with Basic L2CAP header */
7659 if (skb
->len
< L2CAP_HDR_SIZE
) {
7660 BT_ERR("Frame is too short (len %d)", skb
->len
);
7661 l2cap_conn_unreliable(conn
, ECOMM
);
7665 hdr
= (struct l2cap_hdr
*) skb
->data
;
7666 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7668 if (len
== skb
->len
) {
7669 /* Complete frame received */
7670 l2cap_recv_frame(conn
, skb
);
7674 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7676 if (skb
->len
> len
) {
7677 BT_ERR("Frame is too long (len %d, expected len %d)",
7679 l2cap_conn_unreliable(conn
, ECOMM
);
7683 /* Allocate skb for the complete frame (with header) */
7684 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7688 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7690 conn
->rx_len
= len
- skb
->len
;
7694 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7696 if (!conn
->rx_len
) {
7697 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7698 l2cap_conn_unreliable(conn
, ECOMM
);
7702 if (skb
->len
> conn
->rx_len
) {
7703 BT_ERR("Fragment is too long (len %d, expected %d)",
7704 skb
->len
, conn
->rx_len
);
7705 kfree_skb(conn
->rx_skb
);
7706 conn
->rx_skb
= NULL
;
7708 l2cap_conn_unreliable(conn
, ECOMM
);
7712 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7714 conn
->rx_len
-= skb
->len
;
7716 if (!conn
->rx_len
) {
7717 /* Complete frame received. l2cap_recv_frame
7718 * takes ownership of the skb so set the global
7719 * rx_skb pointer to NULL first.
7721 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7722 conn
->rx_skb
= NULL
;
7723 l2cap_recv_frame(conn
, rx_skb
);
7732 static struct hci_cb l2cap_cb
= {
7734 .connect_cfm
= l2cap_connect_cfm
,
7735 .disconn_cfm
= l2cap_disconn_cfm
,
7736 .security_cfm
= l2cap_security_cfm
,
7739 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7741 struct l2cap_chan
*c
;
7743 read_lock(&chan_list_lock
);
7745 list_for_each_entry(c
, &chan_list
, global_l
) {
7746 seq_printf(f
, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7747 &c
->src
, c
->src_type
, &c
->dst
, c
->dst_type
,
7748 c
->state
, __le16_to_cpu(c
->psm
),
7749 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7750 c
->sec_level
, c
->mode
);
7753 read_unlock(&chan_list_lock
);
7758 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs
);
7760 static struct dentry
*l2cap_debugfs
;
7762 int __init
l2cap_init(void)
7766 err
= l2cap_init_sockets();
7770 hci_register_cb(&l2cap_cb
);
7772 if (IS_ERR_OR_NULL(bt_debugfs
))
7775 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7776 NULL
, &l2cap_debugfs_fops
);
7781 void l2cap_exit(void)
7783 debugfs_remove(l2cap_debugfs
);
7784 hci_unregister_cb(&l2cap_cb
);
7785 l2cap_cleanup_sockets();
7788 module_param(disable_ertm
, bool, 0644);
7789 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");