2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline u8
bdaddr_type(u8 link_type
, u8 bdaddr_type
)
69 if (link_type
== LE_LINK
) {
70 if (bdaddr_type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 static inline u8
bdaddr_src_type(struct hci_conn
*hcon
)
81 return bdaddr_type(hcon
->type
, hcon
->src_type
);
84 static inline u8
bdaddr_dst_type(struct hci_conn
*hcon
)
86 return bdaddr_type(hcon
->type
, hcon
->dst_type
);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
106 struct l2cap_chan
*c
;
108 list_for_each_entry(c
, &conn
->chan_l
, list
) {
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
120 struct l2cap_chan
*c
;
122 mutex_lock(&conn
->chan_lock
);
123 c
= __l2cap_get_chan_by_scid(conn
, cid
);
126 mutex_unlock(&conn
->chan_lock
);
131 /* Find channel with given DCID.
132 * Returns locked channel.
134 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
137 struct l2cap_chan
*c
;
139 mutex_lock(&conn
->chan_lock
);
140 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
143 mutex_unlock(&conn
->chan_lock
);
148 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
151 struct l2cap_chan
*c
;
153 list_for_each_entry(c
, &conn
->chan_l
, list
) {
154 if (c
->ident
== ident
)
160 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
163 struct l2cap_chan
*c
;
165 mutex_lock(&conn
->chan_lock
);
166 c
= __l2cap_get_chan_by_ident(conn
, ident
);
169 mutex_unlock(&conn
->chan_lock
);
174 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
176 struct l2cap_chan
*c
;
178 list_for_each_entry(c
, &chan_list
, global_l
) {
179 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
185 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
189 write_lock(&chan_list_lock
);
191 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
201 u16 p
, start
, end
, incr
;
203 if (chan
->src_type
== BDADDR_BREDR
) {
204 start
= L2CAP_PSM_DYN_START
;
205 end
= L2CAP_PSM_AUTO_END
;
208 start
= L2CAP_PSM_LE_DYN_START
;
209 end
= L2CAP_PSM_LE_DYN_END
;
214 for (p
= start
; p
<= end
; p
+= incr
)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
216 chan
->psm
= cpu_to_le16(p
);
217 chan
->sport
= cpu_to_le16(p
);
224 write_unlock(&chan_list_lock
);
227 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
229 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
231 write_lock(&chan_list_lock
);
233 /* Override the defaults (which are for conn-oriented) */
234 chan
->omtu
= L2CAP_DEFAULT_MTU
;
235 chan
->chan_type
= L2CAP_CHAN_FIXED
;
239 write_unlock(&chan_list_lock
);
244 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
248 if (conn
->hcon
->type
== LE_LINK
)
249 dyn_end
= L2CAP_CID_LE_DYN_END
;
251 dyn_end
= L2CAP_CID_DYN_END
;
253 for (cid
= L2CAP_CID_DYN_START
; cid
<= dyn_end
; cid
++) {
254 if (!__l2cap_get_chan_by_scid(conn
, cid
))
261 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
263 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
264 state_to_string(state
));
267 chan
->ops
->state_change(chan
, state
, 0);
270 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
274 chan
->ops
->state_change(chan
, chan
->state
, err
);
277 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
279 chan
->ops
->state_change(chan
, chan
->state
, err
);
282 static void __set_retrans_timer(struct l2cap_chan
*chan
)
284 if (!delayed_work_pending(&chan
->monitor_timer
) &&
285 chan
->retrans_timeout
) {
286 l2cap_set_timer(chan
, &chan
->retrans_timer
,
287 msecs_to_jiffies(chan
->retrans_timeout
));
291 static void __set_monitor_timer(struct l2cap_chan
*chan
)
293 __clear_retrans_timer(chan
);
294 if (chan
->monitor_timeout
) {
295 l2cap_set_timer(chan
, &chan
->monitor_timer
,
296 msecs_to_jiffies(chan
->monitor_timeout
));
300 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
305 skb_queue_walk(head
, skb
) {
306 if (bt_cb(skb
)->l2cap
.txseq
== seq
)
313 /* ---- L2CAP sequence number lists ---- */
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
324 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
326 size_t alloc_size
, i
;
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
332 alloc_size
= roundup_pow_of_two(size
);
334 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
338 seq_list
->mask
= alloc_size
- 1;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 for (i
= 0; i
< alloc_size
; i
++)
342 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
349 kfree(seq_list
->list
);
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
355 /* Constant-time check for list membership */
356 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
359 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
361 u16 seq
= seq_list
->head
;
362 u16 mask
= seq_list
->mask
;
364 seq_list
->head
= seq_list
->list
[seq
& mask
];
365 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
367 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
368 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
369 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
375 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
379 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
382 for (i
= 0; i
<= seq_list
->mask
; i
++)
383 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
385 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
386 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
389 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
391 u16 mask
= seq_list
->mask
;
393 /* All appends happen in constant time */
395 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
398 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
399 seq_list
->head
= seq
;
401 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
403 seq_list
->tail
= seq
;
404 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
407 static void l2cap_chan_timeout(struct work_struct
*work
)
409 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
411 struct l2cap_conn
*conn
= chan
->conn
;
414 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
416 mutex_lock(&conn
->chan_lock
);
417 l2cap_chan_lock(chan
);
419 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
420 reason
= ECONNREFUSED
;
421 else if (chan
->state
== BT_CONNECT
&&
422 chan
->sec_level
!= BT_SECURITY_SDP
)
423 reason
= ECONNREFUSED
;
427 l2cap_chan_close(chan
, reason
);
429 l2cap_chan_unlock(chan
);
431 chan
->ops
->close(chan
);
432 mutex_unlock(&conn
->chan_lock
);
434 l2cap_chan_put(chan
);
437 struct l2cap_chan
*l2cap_chan_create(void)
439 struct l2cap_chan
*chan
;
441 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
445 mutex_init(&chan
->lock
);
447 /* Set default lock nesting level */
448 atomic_set(&chan
->nesting
, L2CAP_NESTING_NORMAL
);
450 write_lock(&chan_list_lock
);
451 list_add(&chan
->global_l
, &chan_list
);
452 write_unlock(&chan_list_lock
);
454 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
456 chan
->state
= BT_OPEN
;
458 kref_init(&chan
->kref
);
460 /* This flag is cleared in l2cap_chan_ready() */
461 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
463 BT_DBG("chan %p", chan
);
467 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
469 static void l2cap_chan_destroy(struct kref
*kref
)
471 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
473 BT_DBG("chan %p", chan
);
475 write_lock(&chan_list_lock
);
476 list_del(&chan
->global_l
);
477 write_unlock(&chan_list_lock
);
482 void l2cap_chan_hold(struct l2cap_chan
*c
)
484 BT_DBG("chan %p orig refcnt %d", c
, kref_read(&c
->kref
));
489 void l2cap_chan_put(struct l2cap_chan
*c
)
491 BT_DBG("chan %p orig refcnt %d", c
, kref_read(&c
->kref
));
493 kref_put(&c
->kref
, l2cap_chan_destroy
);
495 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
497 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
499 chan
->fcs
= L2CAP_FCS_CRC16
;
500 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
501 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
502 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
503 chan
->remote_max_tx
= chan
->max_tx
;
504 chan
->remote_tx_win
= chan
->tx_win
;
505 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
506 chan
->sec_level
= BT_SECURITY_LOW
;
507 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
508 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
509 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
510 chan
->conf_state
= 0;
512 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
516 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
519 chan
->sdu_last_frag
= NULL
;
521 chan
->tx_credits
= 0;
522 chan
->rx_credits
= le_max_credits
;
523 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
525 skb_queue_head_init(&chan
->tx_q
);
528 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
530 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
531 __le16_to_cpu(chan
->psm
), chan
->dcid
);
533 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
537 switch (chan
->chan_type
) {
538 case L2CAP_CHAN_CONN_ORIENTED
:
539 /* Alloc CID for connection-oriented socket */
540 chan
->scid
= l2cap_alloc_cid(conn
);
541 if (conn
->hcon
->type
== ACL_LINK
)
542 chan
->omtu
= L2CAP_DEFAULT_MTU
;
545 case L2CAP_CHAN_CONN_LESS
:
546 /* Connectionless socket */
547 chan
->scid
= L2CAP_CID_CONN_LESS
;
548 chan
->dcid
= L2CAP_CID_CONN_LESS
;
549 chan
->omtu
= L2CAP_DEFAULT_MTU
;
552 case L2CAP_CHAN_FIXED
:
553 /* Caller will set CID and CID specific MTU values */
557 /* Raw socket can send/recv signalling messages only */
558 chan
->scid
= L2CAP_CID_SIGNALING
;
559 chan
->dcid
= L2CAP_CID_SIGNALING
;
560 chan
->omtu
= L2CAP_DEFAULT_MTU
;
563 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
564 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
565 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
566 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
567 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
568 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
570 l2cap_chan_hold(chan
);
572 /* Only keep a reference for fixed channels if they requested it */
573 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
574 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
575 hci_conn_hold(conn
->hcon
);
577 list_add(&chan
->list
, &conn
->chan_l
);
580 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
582 mutex_lock(&conn
->chan_lock
);
583 __l2cap_chan_add(conn
, chan
);
584 mutex_unlock(&conn
->chan_lock
);
587 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
589 struct l2cap_conn
*conn
= chan
->conn
;
591 __clear_chan_timer(chan
);
593 BT_DBG("chan %p, conn %p, err %d, state %s", chan
, conn
, err
,
594 state_to_string(chan
->state
));
596 chan
->ops
->teardown(chan
, err
);
599 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
600 /* Delete from channel list */
601 list_del(&chan
->list
);
603 l2cap_chan_put(chan
);
607 /* Reference was only held for non-fixed channels or
608 * fixed channels that explicitly requested it using the
609 * FLAG_HOLD_HCI_CONN flag.
611 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
612 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
613 hci_conn_drop(conn
->hcon
);
615 if (mgr
&& mgr
->bredr_chan
== chan
)
616 mgr
->bredr_chan
= NULL
;
619 if (chan
->hs_hchan
) {
620 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
622 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
623 amp_disconnect_logical_link(hs_hchan
);
626 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
630 case L2CAP_MODE_BASIC
:
633 case L2CAP_MODE_LE_FLOWCTL
:
634 skb_queue_purge(&chan
->tx_q
);
637 case L2CAP_MODE_ERTM
:
638 __clear_retrans_timer(chan
);
639 __clear_monitor_timer(chan
);
640 __clear_ack_timer(chan
);
642 skb_queue_purge(&chan
->srej_q
);
644 l2cap_seq_list_free(&chan
->srej_list
);
645 l2cap_seq_list_free(&chan
->retrans_list
);
649 case L2CAP_MODE_STREAMING
:
650 skb_queue_purge(&chan
->tx_q
);
656 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
658 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
660 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
661 id_addr_update_work
);
662 struct hci_conn
*hcon
= conn
->hcon
;
663 struct l2cap_chan
*chan
;
665 mutex_lock(&conn
->chan_lock
);
667 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
668 l2cap_chan_lock(chan
);
669 bacpy(&chan
->dst
, &hcon
->dst
);
670 chan
->dst_type
= bdaddr_dst_type(hcon
);
671 l2cap_chan_unlock(chan
);
674 mutex_unlock(&conn
->chan_lock
);
677 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
679 struct l2cap_conn
*conn
= chan
->conn
;
680 struct l2cap_le_conn_rsp rsp
;
683 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
684 result
= L2CAP_CR_AUTHORIZATION
;
686 result
= L2CAP_CR_BAD_PSM
;
688 l2cap_state_change(chan
, BT_DISCONN
);
690 rsp
.dcid
= cpu_to_le16(chan
->scid
);
691 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
692 rsp
.mps
= cpu_to_le16(chan
->mps
);
693 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
694 rsp
.result
= cpu_to_le16(result
);
696 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
700 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
702 struct l2cap_conn
*conn
= chan
->conn
;
703 struct l2cap_conn_rsp rsp
;
706 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
707 result
= L2CAP_CR_SEC_BLOCK
;
709 result
= L2CAP_CR_BAD_PSM
;
711 l2cap_state_change(chan
, BT_DISCONN
);
713 rsp
.scid
= cpu_to_le16(chan
->dcid
);
714 rsp
.dcid
= cpu_to_le16(chan
->scid
);
715 rsp
.result
= cpu_to_le16(result
);
716 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
718 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
721 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
723 struct l2cap_conn
*conn
= chan
->conn
;
725 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
727 switch (chan
->state
) {
729 chan
->ops
->teardown(chan
, 0);
734 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
735 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
736 l2cap_send_disconn_req(chan
, reason
);
738 l2cap_chan_del(chan
, reason
);
742 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
743 if (conn
->hcon
->type
== ACL_LINK
)
744 l2cap_chan_connect_reject(chan
);
745 else if (conn
->hcon
->type
== LE_LINK
)
746 l2cap_chan_le_connect_reject(chan
);
749 l2cap_chan_del(chan
, reason
);
754 l2cap_chan_del(chan
, reason
);
758 chan
->ops
->teardown(chan
, 0);
762 EXPORT_SYMBOL(l2cap_chan_close
);
764 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
766 switch (chan
->chan_type
) {
768 switch (chan
->sec_level
) {
769 case BT_SECURITY_HIGH
:
770 case BT_SECURITY_FIPS
:
771 return HCI_AT_DEDICATED_BONDING_MITM
;
772 case BT_SECURITY_MEDIUM
:
773 return HCI_AT_DEDICATED_BONDING
;
775 return HCI_AT_NO_BONDING
;
778 case L2CAP_CHAN_CONN_LESS
:
779 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
780 if (chan
->sec_level
== BT_SECURITY_LOW
)
781 chan
->sec_level
= BT_SECURITY_SDP
;
783 if (chan
->sec_level
== BT_SECURITY_HIGH
||
784 chan
->sec_level
== BT_SECURITY_FIPS
)
785 return HCI_AT_NO_BONDING_MITM
;
787 return HCI_AT_NO_BONDING
;
789 case L2CAP_CHAN_CONN_ORIENTED
:
790 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
791 if (chan
->sec_level
== BT_SECURITY_LOW
)
792 chan
->sec_level
= BT_SECURITY_SDP
;
794 if (chan
->sec_level
== BT_SECURITY_HIGH
||
795 chan
->sec_level
== BT_SECURITY_FIPS
)
796 return HCI_AT_NO_BONDING_MITM
;
798 return HCI_AT_NO_BONDING
;
802 switch (chan
->sec_level
) {
803 case BT_SECURITY_HIGH
:
804 case BT_SECURITY_FIPS
:
805 return HCI_AT_GENERAL_BONDING_MITM
;
806 case BT_SECURITY_MEDIUM
:
807 return HCI_AT_GENERAL_BONDING
;
809 return HCI_AT_NO_BONDING
;
815 /* Service level security */
816 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
818 struct l2cap_conn
*conn
= chan
->conn
;
821 if (conn
->hcon
->type
== LE_LINK
)
822 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
824 auth_type
= l2cap_get_auth_type(chan
);
826 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
830 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
834 /* Get next available identificator.
835 * 1 - 128 are used by kernel.
836 * 129 - 199 are reserved.
837 * 200 - 254 are used by utilities like l2ping, etc.
840 mutex_lock(&conn
->ident_lock
);
842 if (++conn
->tx_ident
> 128)
847 mutex_unlock(&conn
->ident_lock
);
852 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
855 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
858 BT_DBG("code 0x%2.2x", code
);
863 /* Use NO_FLUSH if supported or we have an LE link (which does
864 * not support auto-flushing packets) */
865 if (lmp_no_flush_capable(conn
->hcon
->hdev
) ||
866 conn
->hcon
->type
== LE_LINK
)
867 flags
= ACL_START_NO_FLUSH
;
871 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
872 skb
->priority
= HCI_PRIO_MAX
;
874 hci_send_acl(conn
->hchan
, skb
, flags
);
877 static bool __chan_is_moving(struct l2cap_chan
*chan
)
879 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
880 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
883 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
885 struct hci_conn
*hcon
= chan
->conn
->hcon
;
888 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
891 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
893 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
900 /* Use NO_FLUSH for LE links (where this is the only option) or
901 * if the BR/EDR link supports it and flushing has not been
902 * explicitly requested (through FLAG_FLUSHABLE).
904 if (hcon
->type
== LE_LINK
||
905 (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
906 lmp_no_flush_capable(hcon
->hdev
)))
907 flags
= ACL_START_NO_FLUSH
;
911 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
912 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
915 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
917 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
918 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
920 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
923 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
924 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
931 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
932 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
939 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
941 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
942 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
944 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
947 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
948 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
955 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
956 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
963 static inline void __unpack_control(struct l2cap_chan
*chan
,
966 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
967 __unpack_extended_control(get_unaligned_le32(skb
->data
),
969 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
971 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
973 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
977 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
981 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
982 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
984 if (control
->sframe
) {
985 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
986 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
987 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
989 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
990 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
996 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
1000 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1001 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
1003 if (control
->sframe
) {
1004 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
1005 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
1006 packed
|= L2CAP_CTRL_FRAME_TYPE
;
1008 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
1009 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1015 static inline void __pack_control(struct l2cap_chan
*chan
,
1016 struct l2cap_ctrl
*control
,
1017 struct sk_buff
*skb
)
1019 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1020 put_unaligned_le32(__pack_extended_control(control
),
1021 skb
->data
+ L2CAP_HDR_SIZE
);
1023 put_unaligned_le16(__pack_enhanced_control(control
),
1024 skb
->data
+ L2CAP_HDR_SIZE
);
1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
1030 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1031 return L2CAP_EXT_HDR_SIZE
;
1033 return L2CAP_ENH_HDR_SIZE
;
1036 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1039 struct sk_buff
*skb
;
1040 struct l2cap_hdr
*lh
;
1041 int hlen
= __ertm_hdr_size(chan
);
1043 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1044 hlen
+= L2CAP_FCS_SIZE
;
1046 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1049 return ERR_PTR(-ENOMEM
);
1051 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
1052 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1053 lh
->cid
= cpu_to_le16(chan
->dcid
);
1055 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1056 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1058 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1060 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1061 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1062 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1065 skb
->priority
= HCI_PRIO_MAX
;
1069 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1070 struct l2cap_ctrl
*control
)
1072 struct sk_buff
*skb
;
1075 BT_DBG("chan %p, control %p", chan
, control
);
1077 if (!control
->sframe
)
1080 if (__chan_is_moving(chan
))
1083 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1087 if (control
->super
== L2CAP_SUPER_RR
)
1088 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1089 else if (control
->super
== L2CAP_SUPER_RNR
)
1090 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1092 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1093 chan
->last_acked_seq
= control
->reqseq
;
1094 __clear_ack_timer(chan
);
1097 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1098 control
->final
, control
->poll
, control
->super
);
1100 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1101 control_field
= __pack_extended_control(control
);
1103 control_field
= __pack_enhanced_control(control
);
1105 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1107 l2cap_do_send(chan
, skb
);
1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1112 struct l2cap_ctrl control
;
1114 BT_DBG("chan %p, poll %d", chan
, poll
);
1116 memset(&control
, 0, sizeof(control
));
1118 control
.poll
= poll
;
1120 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1121 control
.super
= L2CAP_SUPER_RNR
;
1123 control
.super
= L2CAP_SUPER_RR
;
1125 control
.reqseq
= chan
->buffer_seq
;
1126 l2cap_send_sframe(chan
, &control
);
1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1131 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1134 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1137 static bool __amp_capable(struct l2cap_chan
*chan
)
1139 struct l2cap_conn
*conn
= chan
->conn
;
1140 struct hci_dev
*hdev
;
1141 bool amp_available
= false;
1143 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
1146 if (!(conn
->remote_fixed_chan
& L2CAP_FC_A2MP
))
1149 read_lock(&hci_dev_list_lock
);
1150 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1151 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1152 test_bit(HCI_UP
, &hdev
->flags
)) {
1153 amp_available
= true;
1157 read_unlock(&hci_dev_list_lock
);
1159 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1160 return amp_available
;
1165 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1167 /* Check EFS parameters */
1171 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1173 struct l2cap_conn
*conn
= chan
->conn
;
1174 struct l2cap_conn_req req
;
1176 req
.scid
= cpu_to_le16(chan
->scid
);
1177 req
.psm
= chan
->psm
;
1179 chan
->ident
= l2cap_get_ident(conn
);
1181 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1183 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1186 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1188 struct l2cap_create_chan_req req
;
1189 req
.scid
= cpu_to_le16(chan
->scid
);
1190 req
.psm
= chan
->psm
;
1191 req
.amp_id
= amp_id
;
1193 chan
->ident
= l2cap_get_ident(chan
->conn
);
1195 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1199 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1201 struct sk_buff
*skb
;
1203 BT_DBG("chan %p", chan
);
1205 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1208 __clear_retrans_timer(chan
);
1209 __clear_monitor_timer(chan
);
1210 __clear_ack_timer(chan
);
1212 chan
->retry_count
= 0;
1213 skb_queue_walk(&chan
->tx_q
, skb
) {
1214 if (bt_cb(skb
)->l2cap
.retries
)
1215 bt_cb(skb
)->l2cap
.retries
= 1;
1220 chan
->expected_tx_seq
= chan
->buffer_seq
;
1222 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1223 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1224 l2cap_seq_list_clear(&chan
->retrans_list
);
1225 l2cap_seq_list_clear(&chan
->srej_list
);
1226 skb_queue_purge(&chan
->srej_q
);
1228 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1229 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1231 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1234 static void l2cap_move_done(struct l2cap_chan
*chan
)
1236 u8 move_role
= chan
->move_role
;
1237 BT_DBG("chan %p", chan
);
1239 chan
->move_state
= L2CAP_MOVE_STABLE
;
1240 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1242 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1245 switch (move_role
) {
1246 case L2CAP_MOVE_ROLE_INITIATOR
:
1247 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1248 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1250 case L2CAP_MOVE_ROLE_RESPONDER
:
1251 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1256 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1258 /* The channel may have already been flagged as connected in
1259 * case of receiving data before the L2CAP info req/rsp
1260 * procedure is complete.
1262 if (chan
->state
== BT_CONNECTED
)
1265 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 chan
->conf_state
= 0;
1267 __clear_chan_timer(chan
);
1269 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1270 chan
->ops
->suspend(chan
);
1272 chan
->state
= BT_CONNECTED
;
1274 chan
->ops
->ready(chan
);
1277 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1279 struct l2cap_conn
*conn
= chan
->conn
;
1280 struct l2cap_le_conn_req req
;
1282 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1285 req
.psm
= chan
->psm
;
1286 req
.scid
= cpu_to_le16(chan
->scid
);
1287 req
.mtu
= cpu_to_le16(chan
->imtu
);
1288 req
.mps
= cpu_to_le16(chan
->mps
);
1289 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1291 chan
->ident
= l2cap_get_ident(conn
);
1293 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1297 static void l2cap_le_start(struct l2cap_chan
*chan
)
1299 struct l2cap_conn
*conn
= chan
->conn
;
1301 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1305 l2cap_chan_ready(chan
);
1309 if (chan
->state
== BT_CONNECT
)
1310 l2cap_le_connect(chan
);
1313 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1315 if (__amp_capable(chan
)) {
1316 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1317 a2mp_discover_amp(chan
);
1318 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1319 l2cap_le_start(chan
);
1321 l2cap_send_conn_req(chan
);
1325 static void l2cap_request_info(struct l2cap_conn
*conn
)
1327 struct l2cap_info_req req
;
1329 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1332 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1334 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1335 conn
->info_ident
= l2cap_get_ident(conn
);
1337 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1339 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1343 static void l2cap_do_start(struct l2cap_chan
*chan
)
1345 struct l2cap_conn
*conn
= chan
->conn
;
1347 if (conn
->hcon
->type
== LE_LINK
) {
1348 l2cap_le_start(chan
);
1352 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1353 l2cap_request_info(conn
);
1357 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1360 if (l2cap_chan_check_security(chan
, true) &&
1361 __l2cap_no_conn_pending(chan
))
1362 l2cap_start_connection(chan
);
1365 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1367 u32 local_feat_mask
= l2cap_feat_mask
;
1369 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1372 case L2CAP_MODE_ERTM
:
1373 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1374 case L2CAP_MODE_STREAMING
:
1375 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1381 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1383 struct l2cap_conn
*conn
= chan
->conn
;
1384 struct l2cap_disconn_req req
;
1389 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1390 __clear_retrans_timer(chan
);
1391 __clear_monitor_timer(chan
);
1392 __clear_ack_timer(chan
);
1395 if (chan
->scid
== L2CAP_CID_A2MP
) {
1396 l2cap_state_change(chan
, BT_DISCONN
);
1400 req
.dcid
= cpu_to_le16(chan
->dcid
);
1401 req
.scid
= cpu_to_le16(chan
->scid
);
1402 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1405 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1408 /* ---- L2CAP connections ---- */
1409 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1411 struct l2cap_chan
*chan
, *tmp
;
1413 BT_DBG("conn %p", conn
);
1415 mutex_lock(&conn
->chan_lock
);
1417 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1418 l2cap_chan_lock(chan
);
1420 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1421 l2cap_chan_ready(chan
);
1422 l2cap_chan_unlock(chan
);
1426 if (chan
->state
== BT_CONNECT
) {
1427 if (!l2cap_chan_check_security(chan
, true) ||
1428 !__l2cap_no_conn_pending(chan
)) {
1429 l2cap_chan_unlock(chan
);
1433 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1434 && test_bit(CONF_STATE2_DEVICE
,
1435 &chan
->conf_state
)) {
1436 l2cap_chan_close(chan
, ECONNRESET
);
1437 l2cap_chan_unlock(chan
);
1441 l2cap_start_connection(chan
);
1443 } else if (chan
->state
== BT_CONNECT2
) {
1444 struct l2cap_conn_rsp rsp
;
1446 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1447 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1449 if (l2cap_chan_check_security(chan
, false)) {
1450 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1451 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1452 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1453 chan
->ops
->defer(chan
);
1456 l2cap_state_change(chan
, BT_CONFIG
);
1457 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1458 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1461 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1462 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1465 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1468 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1469 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1470 l2cap_chan_unlock(chan
);
1474 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1475 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1476 l2cap_build_conf_req(chan
, buf
), buf
);
1477 chan
->num_conf_req
++;
1480 l2cap_chan_unlock(chan
);
1483 mutex_unlock(&conn
->chan_lock
);
1486 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1488 struct hci_conn
*hcon
= conn
->hcon
;
1489 struct hci_dev
*hdev
= hcon
->hdev
;
1491 BT_DBG("%s conn %p", hdev
->name
, conn
);
1493 /* For outgoing pairing which doesn't necessarily have an
1494 * associated socket (e.g. mgmt_pair_device).
1497 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1499 /* For LE slave connections, make sure the connection interval
1500 * is in the range of the minium and maximum interval that has
1501 * been configured for this connection. If not, then trigger
1502 * the connection update procedure.
1504 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1505 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1506 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1507 struct l2cap_conn_param_update_req req
;
1509 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1510 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1511 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1512 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1514 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1515 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1519 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1521 struct l2cap_chan
*chan
;
1522 struct hci_conn
*hcon
= conn
->hcon
;
1524 BT_DBG("conn %p", conn
);
1526 if (hcon
->type
== ACL_LINK
)
1527 l2cap_request_info(conn
);
1529 mutex_lock(&conn
->chan_lock
);
1531 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1533 l2cap_chan_lock(chan
);
1535 if (chan
->scid
== L2CAP_CID_A2MP
) {
1536 l2cap_chan_unlock(chan
);
1540 if (hcon
->type
== LE_LINK
) {
1541 l2cap_le_start(chan
);
1542 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1543 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1544 l2cap_chan_ready(chan
);
1545 } else if (chan
->state
== BT_CONNECT
) {
1546 l2cap_do_start(chan
);
1549 l2cap_chan_unlock(chan
);
1552 mutex_unlock(&conn
->chan_lock
);
1554 if (hcon
->type
== LE_LINK
)
1555 l2cap_le_conn_ready(conn
);
1557 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1560 /* Notify sockets that we cannot guaranty reliability anymore */
1561 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1563 struct l2cap_chan
*chan
;
1565 BT_DBG("conn %p", conn
);
1567 mutex_lock(&conn
->chan_lock
);
1569 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1570 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1571 l2cap_chan_set_err(chan
, err
);
1574 mutex_unlock(&conn
->chan_lock
);
1577 static void l2cap_info_timeout(struct work_struct
*work
)
1579 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1582 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1583 conn
->info_ident
= 0;
1585 l2cap_conn_start(conn
);
1590 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1591 * callback is called during registration. The ->remove callback is called
1592 * during unregistration.
1593 * An l2cap_user object can either be explicitly unregistered or when the
1594 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1595 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1596 * External modules must own a reference to the l2cap_conn object if they intend
1597 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1598 * any time if they don't.
1601 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1603 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1606 /* We need to check whether l2cap_conn is registered. If it is not, we
1607 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1608 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1609 * relies on the parent hci_conn object to be locked. This itself relies
1610 * on the hci_dev object to be locked. So we must lock the hci device
1615 if (!list_empty(&user
->list
)) {
1620 /* conn->hchan is NULL after l2cap_conn_del() was called */
1626 ret
= user
->probe(conn
, user
);
1630 list_add(&user
->list
, &conn
->users
);
1634 hci_dev_unlock(hdev
);
1637 EXPORT_SYMBOL(l2cap_register_user
);
1639 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1641 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1645 if (list_empty(&user
->list
))
1648 list_del_init(&user
->list
);
1649 user
->remove(conn
, user
);
1652 hci_dev_unlock(hdev
);
1654 EXPORT_SYMBOL(l2cap_unregister_user
);
1656 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1658 struct l2cap_user
*user
;
1660 while (!list_empty(&conn
->users
)) {
1661 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1662 list_del_init(&user
->list
);
1663 user
->remove(conn
, user
);
1667 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1669 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1670 struct l2cap_chan
*chan
, *l
;
1675 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1677 kfree_skb(conn
->rx_skb
);
1679 skb_queue_purge(&conn
->pending_rx
);
1681 /* We can not call flush_work(&conn->pending_rx_work) here since we
1682 * might block if we are running on a worker from the same workqueue
1683 * pending_rx_work is waiting on.
1685 if (work_pending(&conn
->pending_rx_work
))
1686 cancel_work_sync(&conn
->pending_rx_work
);
1688 if (work_pending(&conn
->id_addr_update_work
))
1689 cancel_work_sync(&conn
->id_addr_update_work
);
1691 l2cap_unregister_all_users(conn
);
1693 /* Force the connection to be immediately dropped */
1694 hcon
->disc_timeout
= 0;
1696 mutex_lock(&conn
->chan_lock
);
1699 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1700 l2cap_chan_hold(chan
);
1701 l2cap_chan_lock(chan
);
1703 l2cap_chan_del(chan
, err
);
1705 l2cap_chan_unlock(chan
);
1707 chan
->ops
->close(chan
);
1708 l2cap_chan_put(chan
);
1711 mutex_unlock(&conn
->chan_lock
);
1713 hci_chan_del(conn
->hchan
);
1715 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1716 cancel_delayed_work_sync(&conn
->info_timer
);
1718 hcon
->l2cap_data
= NULL
;
1720 l2cap_conn_put(conn
);
1723 static void l2cap_conn_free(struct kref
*ref
)
1725 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1727 hci_conn_put(conn
->hcon
);
1731 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1733 kref_get(&conn
->ref
);
1736 EXPORT_SYMBOL(l2cap_conn_get
);
1738 void l2cap_conn_put(struct l2cap_conn
*conn
)
1740 kref_put(&conn
->ref
, l2cap_conn_free
);
1742 EXPORT_SYMBOL(l2cap_conn_put
);
1744 /* ---- Socket interface ---- */
1746 /* Find socket with psm and source / destination bdaddr.
1747 * Returns closest match.
1749 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1754 struct l2cap_chan
*c
, *c1
= NULL
;
1756 read_lock(&chan_list_lock
);
1758 list_for_each_entry(c
, &chan_list
, global_l
) {
1759 if (state
&& c
->state
!= state
)
1762 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1765 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1768 if (c
->psm
== psm
) {
1769 int src_match
, dst_match
;
1770 int src_any
, dst_any
;
1773 src_match
= !bacmp(&c
->src
, src
);
1774 dst_match
= !bacmp(&c
->dst
, dst
);
1775 if (src_match
&& dst_match
) {
1777 read_unlock(&chan_list_lock
);
1782 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1783 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1784 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1785 (src_any
&& dst_any
))
1791 l2cap_chan_hold(c1
);
1793 read_unlock(&chan_list_lock
);
1798 static void l2cap_monitor_timeout(struct work_struct
*work
)
1800 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1801 monitor_timer
.work
);
1803 BT_DBG("chan %p", chan
);
1805 l2cap_chan_lock(chan
);
1808 l2cap_chan_unlock(chan
);
1809 l2cap_chan_put(chan
);
1813 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1815 l2cap_chan_unlock(chan
);
1816 l2cap_chan_put(chan
);
1819 static void l2cap_retrans_timeout(struct work_struct
*work
)
1821 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1822 retrans_timer
.work
);
1824 BT_DBG("chan %p", chan
);
1826 l2cap_chan_lock(chan
);
1829 l2cap_chan_unlock(chan
);
1830 l2cap_chan_put(chan
);
1834 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1835 l2cap_chan_unlock(chan
);
1836 l2cap_chan_put(chan
);
1839 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1840 struct sk_buff_head
*skbs
)
1842 struct sk_buff
*skb
;
1843 struct l2cap_ctrl
*control
;
1845 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1847 if (__chan_is_moving(chan
))
1850 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1852 while (!skb_queue_empty(&chan
->tx_q
)) {
1854 skb
= skb_dequeue(&chan
->tx_q
);
1856 bt_cb(skb
)->l2cap
.retries
= 1;
1857 control
= &bt_cb(skb
)->l2cap
;
1859 control
->reqseq
= 0;
1860 control
->txseq
= chan
->next_tx_seq
;
1862 __pack_control(chan
, control
, skb
);
1864 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1865 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1866 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1869 l2cap_do_send(chan
, skb
);
1871 BT_DBG("Sent txseq %u", control
->txseq
);
1873 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1874 chan
->frames_sent
++;
1878 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1880 struct sk_buff
*skb
, *tx_skb
;
1881 struct l2cap_ctrl
*control
;
1884 BT_DBG("chan %p", chan
);
1886 if (chan
->state
!= BT_CONNECTED
)
1889 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1892 if (__chan_is_moving(chan
))
1895 while (chan
->tx_send_head
&&
1896 chan
->unacked_frames
< chan
->remote_tx_win
&&
1897 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1899 skb
= chan
->tx_send_head
;
1901 bt_cb(skb
)->l2cap
.retries
= 1;
1902 control
= &bt_cb(skb
)->l2cap
;
1904 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1907 control
->reqseq
= chan
->buffer_seq
;
1908 chan
->last_acked_seq
= chan
->buffer_seq
;
1909 control
->txseq
= chan
->next_tx_seq
;
1911 __pack_control(chan
, control
, skb
);
1913 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1914 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1915 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1918 /* Clone after data has been modified. Data is assumed to be
1919 read-only (for locking purposes) on cloned sk_buffs.
1921 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1926 __set_retrans_timer(chan
);
1928 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1929 chan
->unacked_frames
++;
1930 chan
->frames_sent
++;
1933 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1934 chan
->tx_send_head
= NULL
;
1936 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1938 l2cap_do_send(chan
, tx_skb
);
1939 BT_DBG("Sent txseq %u", control
->txseq
);
1942 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1943 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1948 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1950 struct l2cap_ctrl control
;
1951 struct sk_buff
*skb
;
1952 struct sk_buff
*tx_skb
;
1955 BT_DBG("chan %p", chan
);
1957 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1960 if (__chan_is_moving(chan
))
1963 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1964 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1966 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1968 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1973 bt_cb(skb
)->l2cap
.retries
++;
1974 control
= bt_cb(skb
)->l2cap
;
1976 if (chan
->max_tx
!= 0 &&
1977 bt_cb(skb
)->l2cap
.retries
> chan
->max_tx
) {
1978 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1979 l2cap_send_disconn_req(chan
, ECONNRESET
);
1980 l2cap_seq_list_clear(&chan
->retrans_list
);
1984 control
.reqseq
= chan
->buffer_seq
;
1985 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1990 if (skb_cloned(skb
)) {
1991 /* Cloned sk_buffs are read-only, so we need a
1994 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1996 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2000 l2cap_seq_list_clear(&chan
->retrans_list
);
2004 /* Update skb contents */
2005 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2006 put_unaligned_le32(__pack_extended_control(&control
),
2007 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2009 put_unaligned_le16(__pack_enhanced_control(&control
),
2010 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2014 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2015 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
2016 tx_skb
->len
- L2CAP_FCS_SIZE
);
2017 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
2021 l2cap_do_send(chan
, tx_skb
);
2023 BT_DBG("Resent txseq %d", control
.txseq
);
2025 chan
->last_acked_seq
= chan
->buffer_seq
;
2029 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2030 struct l2cap_ctrl
*control
)
2032 BT_DBG("chan %p, control %p", chan
, control
);
2034 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2035 l2cap_ertm_resend(chan
);
2038 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2039 struct l2cap_ctrl
*control
)
2041 struct sk_buff
*skb
;
2043 BT_DBG("chan %p, control %p", chan
, control
);
2046 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2048 l2cap_seq_list_clear(&chan
->retrans_list
);
2050 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2053 if (chan
->unacked_frames
) {
2054 skb_queue_walk(&chan
->tx_q
, skb
) {
2055 if (bt_cb(skb
)->l2cap
.txseq
== control
->reqseq
||
2056 skb
== chan
->tx_send_head
)
2060 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2061 if (skb
== chan
->tx_send_head
)
2064 l2cap_seq_list_append(&chan
->retrans_list
,
2065 bt_cb(skb
)->l2cap
.txseq
);
2068 l2cap_ertm_resend(chan
);
2072 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2074 struct l2cap_ctrl control
;
2075 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2076 chan
->last_acked_seq
);
2079 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2080 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2082 memset(&control
, 0, sizeof(control
));
2085 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2086 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2087 __clear_ack_timer(chan
);
2088 control
.super
= L2CAP_SUPER_RNR
;
2089 control
.reqseq
= chan
->buffer_seq
;
2090 l2cap_send_sframe(chan
, &control
);
2092 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2093 l2cap_ertm_send(chan
);
2094 /* If any i-frames were sent, they included an ack */
2095 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2099 /* Ack now if the window is 3/4ths full.
2100 * Calculate without mul or div
2102 threshold
= chan
->ack_win
;
2103 threshold
+= threshold
<< 1;
2106 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2109 if (frames_to_ack
>= threshold
) {
2110 __clear_ack_timer(chan
);
2111 control
.super
= L2CAP_SUPER_RR
;
2112 control
.reqseq
= chan
->buffer_seq
;
2113 l2cap_send_sframe(chan
, &control
);
2118 __set_ack_timer(chan
);
2122 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2123 struct msghdr
*msg
, int len
,
2124 int count
, struct sk_buff
*skb
)
2126 struct l2cap_conn
*conn
= chan
->conn
;
2127 struct sk_buff
**frag
;
2130 if (!copy_from_iter_full(skb_put(skb
, count
), count
, &msg
->msg_iter
))
2136 /* Continuation fragments (no L2CAP header) */
2137 frag
= &skb_shinfo(skb
)->frag_list
;
2139 struct sk_buff
*tmp
;
2141 count
= min_t(unsigned int, conn
->mtu
, len
);
2143 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2144 msg
->msg_flags
& MSG_DONTWAIT
);
2146 return PTR_ERR(tmp
);
2150 if (!copy_from_iter_full(skb_put(*frag
, count
), count
,
2157 skb
->len
+= (*frag
)->len
;
2158 skb
->data_len
+= (*frag
)->len
;
2160 frag
= &(*frag
)->next
;
2166 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2167 struct msghdr
*msg
, size_t len
)
2169 struct l2cap_conn
*conn
= chan
->conn
;
2170 struct sk_buff
*skb
;
2171 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2172 struct l2cap_hdr
*lh
;
2174 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2175 __le16_to_cpu(chan
->psm
), len
);
2177 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2179 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2180 msg
->msg_flags
& MSG_DONTWAIT
);
2184 /* Create L2CAP header */
2185 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2186 lh
->cid
= cpu_to_le16(chan
->dcid
);
2187 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2188 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2190 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2191 if (unlikely(err
< 0)) {
2193 return ERR_PTR(err
);
2198 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2199 struct msghdr
*msg
, size_t len
)
2201 struct l2cap_conn
*conn
= chan
->conn
;
2202 struct sk_buff
*skb
;
2204 struct l2cap_hdr
*lh
;
2206 BT_DBG("chan %p len %zu", chan
, len
);
2208 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2210 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2211 msg
->msg_flags
& MSG_DONTWAIT
);
2215 /* Create L2CAP header */
2216 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2217 lh
->cid
= cpu_to_le16(chan
->dcid
);
2218 lh
->len
= cpu_to_le16(len
);
2220 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2221 if (unlikely(err
< 0)) {
2223 return ERR_PTR(err
);
2228 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2229 struct msghdr
*msg
, size_t len
,
2232 struct l2cap_conn
*conn
= chan
->conn
;
2233 struct sk_buff
*skb
;
2234 int err
, count
, hlen
;
2235 struct l2cap_hdr
*lh
;
2237 BT_DBG("chan %p len %zu", chan
, len
);
2240 return ERR_PTR(-ENOTCONN
);
2242 hlen
= __ertm_hdr_size(chan
);
2245 hlen
+= L2CAP_SDULEN_SIZE
;
2247 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2248 hlen
+= L2CAP_FCS_SIZE
;
2250 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2252 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2253 msg
->msg_flags
& MSG_DONTWAIT
);
2257 /* Create L2CAP header */
2258 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2259 lh
->cid
= cpu_to_le16(chan
->dcid
);
2260 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2262 /* Control header is populated later */
2263 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2264 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2266 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2269 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2271 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2272 if (unlikely(err
< 0)) {
2274 return ERR_PTR(err
);
2277 bt_cb(skb
)->l2cap
.fcs
= chan
->fcs
;
2278 bt_cb(skb
)->l2cap
.retries
= 0;
2282 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2283 struct sk_buff_head
*seg_queue
,
2284 struct msghdr
*msg
, size_t len
)
2286 struct sk_buff
*skb
;
2291 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2293 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2294 * so fragmented skbs are not used. The HCI layer's handling
2295 * of fragmented skbs is not compatible with ERTM's queueing.
2298 /* PDU size is derived from the HCI MTU */
2299 pdu_len
= chan
->conn
->mtu
;
2301 /* Constrain PDU size for BR/EDR connections */
2303 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2305 /* Adjust for largest possible L2CAP overhead. */
2307 pdu_len
-= L2CAP_FCS_SIZE
;
2309 pdu_len
-= __ertm_hdr_size(chan
);
2311 /* Remote device may have requested smaller PDUs */
2312 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2314 if (len
<= pdu_len
) {
2315 sar
= L2CAP_SAR_UNSEGMENTED
;
2319 sar
= L2CAP_SAR_START
;
2324 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2327 __skb_queue_purge(seg_queue
);
2328 return PTR_ERR(skb
);
2331 bt_cb(skb
)->l2cap
.sar
= sar
;
2332 __skb_queue_tail(seg_queue
, skb
);
2338 if (len
<= pdu_len
) {
2339 sar
= L2CAP_SAR_END
;
2342 sar
= L2CAP_SAR_CONTINUE
;
2349 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2351 size_t len
, u16 sdulen
)
2353 struct l2cap_conn
*conn
= chan
->conn
;
2354 struct sk_buff
*skb
;
2355 int err
, count
, hlen
;
2356 struct l2cap_hdr
*lh
;
2358 BT_DBG("chan %p len %zu", chan
, len
);
2361 return ERR_PTR(-ENOTCONN
);
2363 hlen
= L2CAP_HDR_SIZE
;
2366 hlen
+= L2CAP_SDULEN_SIZE
;
2368 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2370 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2371 msg
->msg_flags
& MSG_DONTWAIT
);
2375 /* Create L2CAP header */
2376 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2377 lh
->cid
= cpu_to_le16(chan
->dcid
);
2378 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2381 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2383 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2384 if (unlikely(err
< 0)) {
2386 return ERR_PTR(err
);
2392 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2393 struct sk_buff_head
*seg_queue
,
2394 struct msghdr
*msg
, size_t len
)
2396 struct sk_buff
*skb
;
2400 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2403 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2409 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2411 __skb_queue_purge(seg_queue
);
2412 return PTR_ERR(skb
);
2415 __skb_queue_tail(seg_queue
, skb
);
2421 pdu_len
+= L2CAP_SDULEN_SIZE
;
2428 static void l2cap_le_flowctl_send(struct l2cap_chan
*chan
)
2432 BT_DBG("chan %p", chan
);
2434 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2435 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2440 BT_DBG("Sent %d credits %u queued %u", sent
, chan
->tx_credits
,
2441 skb_queue_len(&chan
->tx_q
));
2444 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2446 struct sk_buff
*skb
;
2448 struct sk_buff_head seg_queue
;
2453 /* Connectionless channel */
2454 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2455 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2457 return PTR_ERR(skb
);
2459 /* Channel lock is released before requesting new skb and then
2460 * reacquired thus we need to recheck channel state.
2462 if (chan
->state
!= BT_CONNECTED
) {
2467 l2cap_do_send(chan
, skb
);
2471 switch (chan
->mode
) {
2472 case L2CAP_MODE_LE_FLOWCTL
:
2473 /* Check outgoing MTU */
2474 if (len
> chan
->omtu
)
2477 __skb_queue_head_init(&seg_queue
);
2479 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2481 if (chan
->state
!= BT_CONNECTED
) {
2482 __skb_queue_purge(&seg_queue
);
2489 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2491 l2cap_le_flowctl_send(chan
);
2493 if (!chan
->tx_credits
)
2494 chan
->ops
->suspend(chan
);
2500 case L2CAP_MODE_BASIC
:
2501 /* Check outgoing MTU */
2502 if (len
> chan
->omtu
)
2505 /* Create a basic PDU */
2506 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2508 return PTR_ERR(skb
);
2510 /* Channel lock is released before requesting new skb and then
2511 * reacquired thus we need to recheck channel state.
2513 if (chan
->state
!= BT_CONNECTED
) {
2518 l2cap_do_send(chan
, skb
);
2522 case L2CAP_MODE_ERTM
:
2523 case L2CAP_MODE_STREAMING
:
2524 /* Check outgoing MTU */
2525 if (len
> chan
->omtu
) {
2530 __skb_queue_head_init(&seg_queue
);
2532 /* Do segmentation before calling in to the state machine,
2533 * since it's possible to block while waiting for memory
2536 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2538 /* The channel could have been closed while segmenting,
2539 * check that it is still connected.
2541 if (chan
->state
!= BT_CONNECTED
) {
2542 __skb_queue_purge(&seg_queue
);
2549 if (chan
->mode
== L2CAP_MODE_ERTM
)
2550 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2552 l2cap_streaming_send(chan
, &seg_queue
);
2556 /* If the skbs were not queued for sending, they'll still be in
2557 * seg_queue and need to be purged.
2559 __skb_queue_purge(&seg_queue
);
2563 BT_DBG("bad state %1.1x", chan
->mode
);
2569 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2571 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2573 struct l2cap_ctrl control
;
2576 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2578 memset(&control
, 0, sizeof(control
));
2580 control
.super
= L2CAP_SUPER_SREJ
;
2582 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2583 seq
= __next_seq(chan
, seq
)) {
2584 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2585 control
.reqseq
= seq
;
2586 l2cap_send_sframe(chan
, &control
);
2587 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2591 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2594 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2596 struct l2cap_ctrl control
;
2598 BT_DBG("chan %p", chan
);
2600 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2603 memset(&control
, 0, sizeof(control
));
2605 control
.super
= L2CAP_SUPER_SREJ
;
2606 control
.reqseq
= chan
->srej_list
.tail
;
2607 l2cap_send_sframe(chan
, &control
);
2610 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2612 struct l2cap_ctrl control
;
2616 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2618 memset(&control
, 0, sizeof(control
));
2620 control
.super
= L2CAP_SUPER_SREJ
;
2622 /* Capture initial list head to allow only one pass through the list. */
2623 initial_head
= chan
->srej_list
.head
;
2626 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2627 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2630 control
.reqseq
= seq
;
2631 l2cap_send_sframe(chan
, &control
);
2632 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2633 } while (chan
->srej_list
.head
!= initial_head
);
2636 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2638 struct sk_buff
*acked_skb
;
2641 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2643 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2646 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2647 chan
->expected_ack_seq
, chan
->unacked_frames
);
2649 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2650 ackseq
= __next_seq(chan
, ackseq
)) {
2652 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2654 skb_unlink(acked_skb
, &chan
->tx_q
);
2655 kfree_skb(acked_skb
);
2656 chan
->unacked_frames
--;
2660 chan
->expected_ack_seq
= reqseq
;
2662 if (chan
->unacked_frames
== 0)
2663 __clear_retrans_timer(chan
);
2665 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2668 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2670 BT_DBG("chan %p", chan
);
2672 chan
->expected_tx_seq
= chan
->buffer_seq
;
2673 l2cap_seq_list_clear(&chan
->srej_list
);
2674 skb_queue_purge(&chan
->srej_q
);
2675 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2678 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2679 struct l2cap_ctrl
*control
,
2680 struct sk_buff_head
*skbs
, u8 event
)
2682 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2686 case L2CAP_EV_DATA_REQUEST
:
2687 if (chan
->tx_send_head
== NULL
)
2688 chan
->tx_send_head
= skb_peek(skbs
);
2690 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2691 l2cap_ertm_send(chan
);
2693 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2694 BT_DBG("Enter LOCAL_BUSY");
2695 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2697 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2698 /* The SREJ_SENT state must be aborted if we are to
2699 * enter the LOCAL_BUSY state.
2701 l2cap_abort_rx_srej_sent(chan
);
2704 l2cap_send_ack(chan
);
2707 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2708 BT_DBG("Exit LOCAL_BUSY");
2709 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2711 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2712 struct l2cap_ctrl local_control
;
2714 memset(&local_control
, 0, sizeof(local_control
));
2715 local_control
.sframe
= 1;
2716 local_control
.super
= L2CAP_SUPER_RR
;
2717 local_control
.poll
= 1;
2718 local_control
.reqseq
= chan
->buffer_seq
;
2719 l2cap_send_sframe(chan
, &local_control
);
2721 chan
->retry_count
= 1;
2722 __set_monitor_timer(chan
);
2723 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2726 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2727 l2cap_process_reqseq(chan
, control
->reqseq
);
2729 case L2CAP_EV_EXPLICIT_POLL
:
2730 l2cap_send_rr_or_rnr(chan
, 1);
2731 chan
->retry_count
= 1;
2732 __set_monitor_timer(chan
);
2733 __clear_ack_timer(chan
);
2734 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2736 case L2CAP_EV_RETRANS_TO
:
2737 l2cap_send_rr_or_rnr(chan
, 1);
2738 chan
->retry_count
= 1;
2739 __set_monitor_timer(chan
);
2740 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2742 case L2CAP_EV_RECV_FBIT
:
2743 /* Nothing to process */
2750 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2751 struct l2cap_ctrl
*control
,
2752 struct sk_buff_head
*skbs
, u8 event
)
2754 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2758 case L2CAP_EV_DATA_REQUEST
:
2759 if (chan
->tx_send_head
== NULL
)
2760 chan
->tx_send_head
= skb_peek(skbs
);
2761 /* Queue data, but don't send. */
2762 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2764 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2765 BT_DBG("Enter LOCAL_BUSY");
2766 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2768 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2769 /* The SREJ_SENT state must be aborted if we are to
2770 * enter the LOCAL_BUSY state.
2772 l2cap_abort_rx_srej_sent(chan
);
2775 l2cap_send_ack(chan
);
2778 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2779 BT_DBG("Exit LOCAL_BUSY");
2780 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2782 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2783 struct l2cap_ctrl local_control
;
2784 memset(&local_control
, 0, sizeof(local_control
));
2785 local_control
.sframe
= 1;
2786 local_control
.super
= L2CAP_SUPER_RR
;
2787 local_control
.poll
= 1;
2788 local_control
.reqseq
= chan
->buffer_seq
;
2789 l2cap_send_sframe(chan
, &local_control
);
2791 chan
->retry_count
= 1;
2792 __set_monitor_timer(chan
);
2793 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2796 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2797 l2cap_process_reqseq(chan
, control
->reqseq
);
2801 case L2CAP_EV_RECV_FBIT
:
2802 if (control
&& control
->final
) {
2803 __clear_monitor_timer(chan
);
2804 if (chan
->unacked_frames
> 0)
2805 __set_retrans_timer(chan
);
2806 chan
->retry_count
= 0;
2807 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2808 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2811 case L2CAP_EV_EXPLICIT_POLL
:
2814 case L2CAP_EV_MONITOR_TO
:
2815 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2816 l2cap_send_rr_or_rnr(chan
, 1);
2817 __set_monitor_timer(chan
);
2818 chan
->retry_count
++;
2820 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2828 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2829 struct sk_buff_head
*skbs
, u8 event
)
2831 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2832 chan
, control
, skbs
, event
, chan
->tx_state
);
2834 switch (chan
->tx_state
) {
2835 case L2CAP_TX_STATE_XMIT
:
2836 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2838 case L2CAP_TX_STATE_WAIT_F
:
2839 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2847 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2848 struct l2cap_ctrl
*control
)
2850 BT_DBG("chan %p, control %p", chan
, control
);
2851 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2854 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2855 struct l2cap_ctrl
*control
)
2857 BT_DBG("chan %p, control %p", chan
, control
);
2858 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2861 /* Copy frame to all raw sockets on that connection */
2862 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2864 struct sk_buff
*nskb
;
2865 struct l2cap_chan
*chan
;
2867 BT_DBG("conn %p", conn
);
2869 mutex_lock(&conn
->chan_lock
);
2871 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2872 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2875 /* Don't send frame to the channel it came from */
2876 if (bt_cb(skb
)->l2cap
.chan
== chan
)
2879 nskb
= skb_clone(skb
, GFP_KERNEL
);
2882 if (chan
->ops
->recv(chan
, nskb
))
2886 mutex_unlock(&conn
->chan_lock
);
2889 /* ---- L2CAP signalling commands ---- */
2890 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2891 u8 ident
, u16 dlen
, void *data
)
2893 struct sk_buff
*skb
, **frag
;
2894 struct l2cap_cmd_hdr
*cmd
;
2895 struct l2cap_hdr
*lh
;
2898 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2899 conn
, code
, ident
, dlen
);
2901 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2904 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2905 count
= min_t(unsigned int, conn
->mtu
, len
);
2907 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2911 lh
= skb_put(skb
, L2CAP_HDR_SIZE
);
2912 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2914 if (conn
->hcon
->type
== LE_LINK
)
2915 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2917 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2919 cmd
= skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2922 cmd
->len
= cpu_to_le16(dlen
);
2925 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2926 skb_put_data(skb
, data
, count
);
2932 /* Continuation fragments (no L2CAP header) */
2933 frag
= &skb_shinfo(skb
)->frag_list
;
2935 count
= min_t(unsigned int, conn
->mtu
, len
);
2937 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2941 skb_put_data(*frag
, data
, count
);
2946 frag
= &(*frag
)->next
;
2956 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2959 struct l2cap_conf_opt
*opt
= *ptr
;
2962 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2970 *val
= *((u8
*) opt
->val
);
2974 *val
= get_unaligned_le16(opt
->val
);
2978 *val
= get_unaligned_le32(opt
->val
);
2982 *val
= (unsigned long) opt
->val
;
2986 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2990 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2992 struct l2cap_conf_opt
*opt
= *ptr
;
2994 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3001 *((u8
*) opt
->val
) = val
;
3005 put_unaligned_le16(val
, opt
->val
);
3009 put_unaligned_le32(val
, opt
->val
);
3013 memcpy(opt
->val
, (void *) val
, len
);
3017 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3020 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3022 struct l2cap_conf_efs efs
;
3024 switch (chan
->mode
) {
3025 case L2CAP_MODE_ERTM
:
3026 efs
.id
= chan
->local_id
;
3027 efs
.stype
= chan
->local_stype
;
3028 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3029 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3030 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3031 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3034 case L2CAP_MODE_STREAMING
:
3036 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3037 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3038 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3047 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3048 (unsigned long) &efs
);
3051 static void l2cap_ack_timeout(struct work_struct
*work
)
3053 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3057 BT_DBG("chan %p", chan
);
3059 l2cap_chan_lock(chan
);
3061 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3062 chan
->last_acked_seq
);
3065 l2cap_send_rr_or_rnr(chan
, 0);
3067 l2cap_chan_unlock(chan
);
3068 l2cap_chan_put(chan
);
3071 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3075 chan
->next_tx_seq
= 0;
3076 chan
->expected_tx_seq
= 0;
3077 chan
->expected_ack_seq
= 0;
3078 chan
->unacked_frames
= 0;
3079 chan
->buffer_seq
= 0;
3080 chan
->frames_sent
= 0;
3081 chan
->last_acked_seq
= 0;
3083 chan
->sdu_last_frag
= NULL
;
3086 skb_queue_head_init(&chan
->tx_q
);
3088 chan
->local_amp_id
= AMP_ID_BREDR
;
3089 chan
->move_id
= AMP_ID_BREDR
;
3090 chan
->move_state
= L2CAP_MOVE_STABLE
;
3091 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3093 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3096 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3097 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3099 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3100 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3101 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3103 skb_queue_head_init(&chan
->srej_q
);
3105 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3109 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3111 l2cap_seq_list_free(&chan
->srej_list
);
3116 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3119 case L2CAP_MODE_STREAMING
:
3120 case L2CAP_MODE_ERTM
:
3121 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3125 return L2CAP_MODE_BASIC
;
3129 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3131 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3132 (conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
));
3135 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3137 return ((conn
->local_fixed_chan
& L2CAP_FC_A2MP
) &&
3138 (conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
));
3141 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3142 struct l2cap_conf_rfc
*rfc
)
3144 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3145 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3147 /* Class 1 devices have must have ERTM timeouts
3148 * exceeding the Link Supervision Timeout. The
3149 * default Link Supervision Timeout for AMP
3150 * controllers is 10 seconds.
3152 * Class 1 devices use 0xffffffff for their
3153 * best-effort flush timeout, so the clamping logic
3154 * will result in a timeout that meets the above
3155 * requirement. ERTM timeouts are 16-bit values, so
3156 * the maximum timeout is 65.535 seconds.
3159 /* Convert timeout to milliseconds and round */
3160 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3162 /* This is the recommended formula for class 2 devices
3163 * that start ERTM timers when packets are sent to the
3166 ertm_to
= 3 * ertm_to
+ 500;
3168 if (ertm_to
> 0xffff)
3171 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3172 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3174 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3175 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3179 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3181 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3182 __l2cap_ews_supported(chan
->conn
)) {
3183 /* use extended control field */
3184 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3185 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3187 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3188 L2CAP_DEFAULT_TX_WINDOW
);
3189 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3191 chan
->ack_win
= chan
->tx_win
;
3194 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3196 struct l2cap_conf_req
*req
= data
;
3197 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3198 void *ptr
= req
->data
;
3201 BT_DBG("chan %p", chan
);
3203 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3206 switch (chan
->mode
) {
3207 case L2CAP_MODE_STREAMING
:
3208 case L2CAP_MODE_ERTM
:
3209 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3212 if (__l2cap_efs_supported(chan
->conn
))
3213 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3217 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3222 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3223 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3225 switch (chan
->mode
) {
3226 case L2CAP_MODE_BASIC
:
3230 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3231 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3234 rfc
.mode
= L2CAP_MODE_BASIC
;
3236 rfc
.max_transmit
= 0;
3237 rfc
.retrans_timeout
= 0;
3238 rfc
.monitor_timeout
= 0;
3239 rfc
.max_pdu_size
= 0;
3241 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3242 (unsigned long) &rfc
);
3245 case L2CAP_MODE_ERTM
:
3246 rfc
.mode
= L2CAP_MODE_ERTM
;
3247 rfc
.max_transmit
= chan
->max_tx
;
3249 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3251 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3252 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3254 rfc
.max_pdu_size
= cpu_to_le16(size
);
3256 l2cap_txwin_setup(chan
);
3258 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3259 L2CAP_DEFAULT_TX_WINDOW
);
3261 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3262 (unsigned long) &rfc
);
3264 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3265 l2cap_add_opt_efs(&ptr
, chan
);
3267 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3268 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3271 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3272 if (chan
->fcs
== L2CAP_FCS_NONE
||
3273 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3274 chan
->fcs
= L2CAP_FCS_NONE
;
3275 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3280 case L2CAP_MODE_STREAMING
:
3281 l2cap_txwin_setup(chan
);
3282 rfc
.mode
= L2CAP_MODE_STREAMING
;
3284 rfc
.max_transmit
= 0;
3285 rfc
.retrans_timeout
= 0;
3286 rfc
.monitor_timeout
= 0;
3288 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3289 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3291 rfc
.max_pdu_size
= cpu_to_le16(size
);
3293 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3294 (unsigned long) &rfc
);
3296 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3297 l2cap_add_opt_efs(&ptr
, chan
);
3299 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3300 if (chan
->fcs
== L2CAP_FCS_NONE
||
3301 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3302 chan
->fcs
= L2CAP_FCS_NONE
;
3303 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3309 req
->dcid
= cpu_to_le16(chan
->dcid
);
3310 req
->flags
= cpu_to_le16(0);
3315 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3317 struct l2cap_conf_rsp
*rsp
= data
;
3318 void *ptr
= rsp
->data
;
3319 void *req
= chan
->conf_req
;
3320 int len
= chan
->conf_len
;
3321 int type
, hint
, olen
;
3323 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3324 struct l2cap_conf_efs efs
;
3326 u16 mtu
= L2CAP_DEFAULT_MTU
;
3327 u16 result
= L2CAP_CONF_SUCCESS
;
3330 BT_DBG("chan %p", chan
);
3332 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3333 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3335 hint
= type
& L2CAP_CONF_HINT
;
3336 type
&= L2CAP_CONF_MASK
;
3339 case L2CAP_CONF_MTU
:
3343 case L2CAP_CONF_FLUSH_TO
:
3344 chan
->flush_to
= val
;
3347 case L2CAP_CONF_QOS
:
3350 case L2CAP_CONF_RFC
:
3351 if (olen
== sizeof(rfc
))
3352 memcpy(&rfc
, (void *) val
, olen
);
3355 case L2CAP_CONF_FCS
:
3356 if (val
== L2CAP_FCS_NONE
)
3357 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3360 case L2CAP_CONF_EFS
:
3362 if (olen
== sizeof(efs
))
3363 memcpy(&efs
, (void *) val
, olen
);
3366 case L2CAP_CONF_EWS
:
3367 if (!(chan
->conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
3368 return -ECONNREFUSED
;
3370 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3371 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3372 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3373 chan
->remote_tx_win
= val
;
3380 result
= L2CAP_CONF_UNKNOWN
;
3381 *((u8
*) ptr
++) = type
;
3386 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3389 switch (chan
->mode
) {
3390 case L2CAP_MODE_STREAMING
:
3391 case L2CAP_MODE_ERTM
:
3392 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3393 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3394 chan
->conn
->feat_mask
);
3399 if (__l2cap_efs_supported(chan
->conn
))
3400 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3402 return -ECONNREFUSED
;
3405 if (chan
->mode
!= rfc
.mode
)
3406 return -ECONNREFUSED
;
3412 if (chan
->mode
!= rfc
.mode
) {
3413 result
= L2CAP_CONF_UNACCEPT
;
3414 rfc
.mode
= chan
->mode
;
3416 if (chan
->num_conf_rsp
== 1)
3417 return -ECONNREFUSED
;
3419 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3420 (unsigned long) &rfc
);
3423 if (result
== L2CAP_CONF_SUCCESS
) {
3424 /* Configure output options and let the other side know
3425 * which ones we don't like. */
3427 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3428 result
= L2CAP_CONF_UNACCEPT
;
3431 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3433 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3436 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3437 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3438 efs
.stype
!= chan
->local_stype
) {
3440 result
= L2CAP_CONF_UNACCEPT
;
3442 if (chan
->num_conf_req
>= 1)
3443 return -ECONNREFUSED
;
3445 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3447 (unsigned long) &efs
);
3449 /* Send PENDING Conf Rsp */
3450 result
= L2CAP_CONF_PENDING
;
3451 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3456 case L2CAP_MODE_BASIC
:
3457 chan
->fcs
= L2CAP_FCS_NONE
;
3458 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3461 case L2CAP_MODE_ERTM
:
3462 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3463 chan
->remote_tx_win
= rfc
.txwin_size
;
3465 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3467 chan
->remote_max_tx
= rfc
.max_transmit
;
3469 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3470 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3471 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3472 rfc
.max_pdu_size
= cpu_to_le16(size
);
3473 chan
->remote_mps
= size
;
3475 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3477 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3479 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3480 sizeof(rfc
), (unsigned long) &rfc
);
3482 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3483 chan
->remote_id
= efs
.id
;
3484 chan
->remote_stype
= efs
.stype
;
3485 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3486 chan
->remote_flush_to
=
3487 le32_to_cpu(efs
.flush_to
);
3488 chan
->remote_acc_lat
=
3489 le32_to_cpu(efs
.acc_lat
);
3490 chan
->remote_sdu_itime
=
3491 le32_to_cpu(efs
.sdu_itime
);
3492 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3494 (unsigned long) &efs
);
3498 case L2CAP_MODE_STREAMING
:
3499 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3500 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3501 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3502 rfc
.max_pdu_size
= cpu_to_le16(size
);
3503 chan
->remote_mps
= size
;
3505 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3507 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3508 (unsigned long) &rfc
);
3513 result
= L2CAP_CONF_UNACCEPT
;
3515 memset(&rfc
, 0, sizeof(rfc
));
3516 rfc
.mode
= chan
->mode
;
3519 if (result
== L2CAP_CONF_SUCCESS
)
3520 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3522 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3523 rsp
->result
= cpu_to_le16(result
);
3524 rsp
->flags
= cpu_to_le16(0);
3529 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3530 void *data
, u16
*result
)
3532 struct l2cap_conf_req
*req
= data
;
3533 void *ptr
= req
->data
;
3536 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3537 struct l2cap_conf_efs efs
;
3539 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3541 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3542 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3545 case L2CAP_CONF_MTU
:
3546 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3547 *result
= L2CAP_CONF_UNACCEPT
;
3548 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3551 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3554 case L2CAP_CONF_FLUSH_TO
:
3555 chan
->flush_to
= val
;
3556 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3560 case L2CAP_CONF_RFC
:
3561 if (olen
== sizeof(rfc
))
3562 memcpy(&rfc
, (void *)val
, olen
);
3564 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3565 rfc
.mode
!= chan
->mode
)
3566 return -ECONNREFUSED
;
3570 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3571 sizeof(rfc
), (unsigned long) &rfc
);
3574 case L2CAP_CONF_EWS
:
3575 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3576 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3580 case L2CAP_CONF_EFS
:
3581 if (olen
== sizeof(efs
))
3582 memcpy(&efs
, (void *)val
, olen
);
3584 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3585 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3586 efs
.stype
!= chan
->local_stype
)
3587 return -ECONNREFUSED
;
3589 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3590 (unsigned long) &efs
);
3593 case L2CAP_CONF_FCS
:
3594 if (*result
== L2CAP_CONF_PENDING
)
3595 if (val
== L2CAP_FCS_NONE
)
3596 set_bit(CONF_RECV_NO_FCS
,
3602 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3603 return -ECONNREFUSED
;
3605 chan
->mode
= rfc
.mode
;
3607 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3609 case L2CAP_MODE_ERTM
:
3610 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3611 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3612 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3613 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3614 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3617 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3618 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3619 chan
->local_sdu_itime
=
3620 le32_to_cpu(efs
.sdu_itime
);
3621 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3622 chan
->local_flush_to
=
3623 le32_to_cpu(efs
.flush_to
);
3627 case L2CAP_MODE_STREAMING
:
3628 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3632 req
->dcid
= cpu_to_le16(chan
->dcid
);
3633 req
->flags
= cpu_to_le16(0);
3638 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3639 u16 result
, u16 flags
)
3641 struct l2cap_conf_rsp
*rsp
= data
;
3642 void *ptr
= rsp
->data
;
3644 BT_DBG("chan %p", chan
);
3646 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3647 rsp
->result
= cpu_to_le16(result
);
3648 rsp
->flags
= cpu_to_le16(flags
);
3653 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3655 struct l2cap_le_conn_rsp rsp
;
3656 struct l2cap_conn
*conn
= chan
->conn
;
3658 BT_DBG("chan %p", chan
);
3660 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3661 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3662 rsp
.mps
= cpu_to_le16(chan
->mps
);
3663 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3664 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3666 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3670 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3672 struct l2cap_conn_rsp rsp
;
3673 struct l2cap_conn
*conn
= chan
->conn
;
3677 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3678 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3679 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3680 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3683 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3685 rsp_code
= L2CAP_CONN_RSP
;
3687 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3689 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3691 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3694 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3695 l2cap_build_conf_req(chan
, buf
), buf
);
3696 chan
->num_conf_req
++;
3699 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3703 /* Use sane default values in case a misbehaving remote device
3704 * did not send an RFC or extended window size option.
3706 u16 txwin_ext
= chan
->ack_win
;
3707 struct l2cap_conf_rfc rfc
= {
3709 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3710 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3711 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3712 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3715 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3717 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3720 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3721 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3724 case L2CAP_CONF_RFC
:
3725 if (olen
== sizeof(rfc
))
3726 memcpy(&rfc
, (void *)val
, olen
);
3728 case L2CAP_CONF_EWS
:
3735 case L2CAP_MODE_ERTM
:
3736 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3737 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3738 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3739 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3740 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3742 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3745 case L2CAP_MODE_STREAMING
:
3746 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3750 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3751 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3754 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3756 if (cmd_len
< sizeof(*rej
))
3759 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3762 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3763 cmd
->ident
== conn
->info_ident
) {
3764 cancel_delayed_work(&conn
->info_timer
);
3766 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3767 conn
->info_ident
= 0;
3769 l2cap_conn_start(conn
);
3775 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3776 struct l2cap_cmd_hdr
*cmd
,
3777 u8
*data
, u8 rsp_code
, u8 amp_id
)
3779 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3780 struct l2cap_conn_rsp rsp
;
3781 struct l2cap_chan
*chan
= NULL
, *pchan
;
3782 int result
, status
= L2CAP_CS_NO_INFO
;
3784 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3785 __le16 psm
= req
->psm
;
3787 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3789 /* Check if we have socket listening on psm */
3790 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3791 &conn
->hcon
->dst
, ACL_LINK
);
3793 result
= L2CAP_CR_BAD_PSM
;
3797 mutex_lock(&conn
->chan_lock
);
3798 l2cap_chan_lock(pchan
);
3800 /* Check if the ACL is secure enough (if not SDP) */
3801 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3802 !hci_conn_check_link_mode(conn
->hcon
)) {
3803 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3804 result
= L2CAP_CR_SEC_BLOCK
;
3808 result
= L2CAP_CR_NO_MEM
;
3810 /* Check if we already have channel with that dcid */
3811 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3814 chan
= pchan
->ops
->new_connection(pchan
);
3818 /* For certain devices (ex: HID mouse), support for authentication,
3819 * pairing and bonding is optional. For such devices, inorder to avoid
3820 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3821 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3823 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3825 bacpy(&chan
->src
, &conn
->hcon
->src
);
3826 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3827 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
3828 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
3831 chan
->local_amp_id
= amp_id
;
3833 __l2cap_chan_add(conn
, chan
);
3837 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3839 chan
->ident
= cmd
->ident
;
3841 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3842 if (l2cap_chan_check_security(chan
, false)) {
3843 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3844 l2cap_state_change(chan
, BT_CONNECT2
);
3845 result
= L2CAP_CR_PEND
;
3846 status
= L2CAP_CS_AUTHOR_PEND
;
3847 chan
->ops
->defer(chan
);
3849 /* Force pending result for AMP controllers.
3850 * The connection will succeed after the
3851 * physical link is up.
3853 if (amp_id
== AMP_ID_BREDR
) {
3854 l2cap_state_change(chan
, BT_CONFIG
);
3855 result
= L2CAP_CR_SUCCESS
;
3857 l2cap_state_change(chan
, BT_CONNECT2
);
3858 result
= L2CAP_CR_PEND
;
3860 status
= L2CAP_CS_NO_INFO
;
3863 l2cap_state_change(chan
, BT_CONNECT2
);
3864 result
= L2CAP_CR_PEND
;
3865 status
= L2CAP_CS_AUTHEN_PEND
;
3868 l2cap_state_change(chan
, BT_CONNECT2
);
3869 result
= L2CAP_CR_PEND
;
3870 status
= L2CAP_CS_NO_INFO
;
3874 l2cap_chan_unlock(pchan
);
3875 mutex_unlock(&conn
->chan_lock
);
3876 l2cap_chan_put(pchan
);
3879 rsp
.scid
= cpu_to_le16(scid
);
3880 rsp
.dcid
= cpu_to_le16(dcid
);
3881 rsp
.result
= cpu_to_le16(result
);
3882 rsp
.status
= cpu_to_le16(status
);
3883 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3885 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3886 struct l2cap_info_req info
;
3887 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3889 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3890 conn
->info_ident
= l2cap_get_ident(conn
);
3892 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3894 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3895 sizeof(info
), &info
);
3898 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3899 result
== L2CAP_CR_SUCCESS
) {
3901 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3902 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3903 l2cap_build_conf_req(chan
, buf
), buf
);
3904 chan
->num_conf_req
++;
3910 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3911 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3913 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3914 struct hci_conn
*hcon
= conn
->hcon
;
3916 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3920 if (hci_dev_test_flag(hdev
, HCI_MGMT
) &&
3921 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3922 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
3923 hci_dev_unlock(hdev
);
3925 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3929 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3930 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3933 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3934 u16 scid
, dcid
, result
, status
;
3935 struct l2cap_chan
*chan
;
3939 if (cmd_len
< sizeof(*rsp
))
3942 scid
= __le16_to_cpu(rsp
->scid
);
3943 dcid
= __le16_to_cpu(rsp
->dcid
);
3944 result
= __le16_to_cpu(rsp
->result
);
3945 status
= __le16_to_cpu(rsp
->status
);
3947 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3948 dcid
, scid
, result
, status
);
3950 mutex_lock(&conn
->chan_lock
);
3953 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3959 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3968 l2cap_chan_lock(chan
);
3971 case L2CAP_CR_SUCCESS
:
3972 l2cap_state_change(chan
, BT_CONFIG
);
3975 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3977 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3980 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3981 l2cap_build_conf_req(chan
, req
), req
);
3982 chan
->num_conf_req
++;
3986 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3990 l2cap_chan_del(chan
, ECONNREFUSED
);
3994 l2cap_chan_unlock(chan
);
3997 mutex_unlock(&conn
->chan_lock
);
4002 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4004 /* FCS is enabled only in ERTM or streaming mode, if one or both
4007 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4008 chan
->fcs
= L2CAP_FCS_NONE
;
4009 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4010 chan
->fcs
= L2CAP_FCS_CRC16
;
4013 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4014 u8 ident
, u16 flags
)
4016 struct l2cap_conn
*conn
= chan
->conn
;
4018 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4021 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4022 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4024 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4025 l2cap_build_conf_rsp(chan
, data
,
4026 L2CAP_CONF_SUCCESS
, flags
), data
);
4029 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4032 struct l2cap_cmd_rej_cid rej
;
4034 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4035 rej
.scid
= __cpu_to_le16(scid
);
4036 rej
.dcid
= __cpu_to_le16(dcid
);
4038 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4041 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4042 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4045 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4048 struct l2cap_chan
*chan
;
4051 if (cmd_len
< sizeof(*req
))
4054 dcid
= __le16_to_cpu(req
->dcid
);
4055 flags
= __le16_to_cpu(req
->flags
);
4057 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4059 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4061 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4065 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4066 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4071 /* Reject if config buffer is too small. */
4072 len
= cmd_len
- sizeof(*req
);
4073 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4074 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4075 l2cap_build_conf_rsp(chan
, rsp
,
4076 L2CAP_CONF_REJECT
, flags
), rsp
);
4081 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4082 chan
->conf_len
+= len
;
4084 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4085 /* Incomplete config. Send empty response. */
4086 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4087 l2cap_build_conf_rsp(chan
, rsp
,
4088 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4092 /* Complete config. */
4093 len
= l2cap_parse_conf_req(chan
, rsp
);
4095 l2cap_send_disconn_req(chan
, ECONNRESET
);
4099 chan
->ident
= cmd
->ident
;
4100 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4101 chan
->num_conf_rsp
++;
4103 /* Reset config buffer. */
4106 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4109 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4110 set_default_fcs(chan
);
4112 if (chan
->mode
== L2CAP_MODE_ERTM
||
4113 chan
->mode
== L2CAP_MODE_STREAMING
)
4114 err
= l2cap_ertm_init(chan
);
4117 l2cap_send_disconn_req(chan
, -err
);
4119 l2cap_chan_ready(chan
);
4124 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4126 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4127 l2cap_build_conf_req(chan
, buf
), buf
);
4128 chan
->num_conf_req
++;
4131 /* Got Conf Rsp PENDING from remote side and assume we sent
4132 Conf Rsp PENDING in the code above */
4133 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4134 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4136 /* check compatibility */
4138 /* Send rsp for BR/EDR channel */
4140 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4142 chan
->ident
= cmd
->ident
;
4146 l2cap_chan_unlock(chan
);
4150 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4151 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4154 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4155 u16 scid
, flags
, result
;
4156 struct l2cap_chan
*chan
;
4157 int len
= cmd_len
- sizeof(*rsp
);
4160 if (cmd_len
< sizeof(*rsp
))
4163 scid
= __le16_to_cpu(rsp
->scid
);
4164 flags
= __le16_to_cpu(rsp
->flags
);
4165 result
= __le16_to_cpu(rsp
->result
);
4167 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4170 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4175 case L2CAP_CONF_SUCCESS
:
4176 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4177 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4180 case L2CAP_CONF_PENDING
:
4181 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4183 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4186 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4189 l2cap_send_disconn_req(chan
, ECONNRESET
);
4193 if (!chan
->hs_hcon
) {
4194 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4197 if (l2cap_check_efs(chan
)) {
4198 amp_create_logical_link(chan
);
4199 chan
->ident
= cmd
->ident
;
4205 case L2CAP_CONF_UNACCEPT
:
4206 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4209 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4210 l2cap_send_disconn_req(chan
, ECONNRESET
);
4214 /* throw out any old stored conf requests */
4215 result
= L2CAP_CONF_SUCCESS
;
4216 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4219 l2cap_send_disconn_req(chan
, ECONNRESET
);
4223 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4224 L2CAP_CONF_REQ
, len
, req
);
4225 chan
->num_conf_req
++;
4226 if (result
!= L2CAP_CONF_SUCCESS
)
4232 l2cap_chan_set_err(chan
, ECONNRESET
);
4234 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4235 l2cap_send_disconn_req(chan
, ECONNRESET
);
4239 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4242 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4244 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4245 set_default_fcs(chan
);
4247 if (chan
->mode
== L2CAP_MODE_ERTM
||
4248 chan
->mode
== L2CAP_MODE_STREAMING
)
4249 err
= l2cap_ertm_init(chan
);
4252 l2cap_send_disconn_req(chan
, -err
);
4254 l2cap_chan_ready(chan
);
4258 l2cap_chan_unlock(chan
);
4262 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4263 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4266 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4267 struct l2cap_disconn_rsp rsp
;
4269 struct l2cap_chan
*chan
;
4271 if (cmd_len
!= sizeof(*req
))
4274 scid
= __le16_to_cpu(req
->scid
);
4275 dcid
= __le16_to_cpu(req
->dcid
);
4277 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4279 mutex_lock(&conn
->chan_lock
);
4281 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4283 mutex_unlock(&conn
->chan_lock
);
4284 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4288 l2cap_chan_lock(chan
);
4290 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4291 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4292 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4294 chan
->ops
->set_shutdown(chan
);
4296 l2cap_chan_hold(chan
);
4297 l2cap_chan_del(chan
, ECONNRESET
);
4299 l2cap_chan_unlock(chan
);
4301 chan
->ops
->close(chan
);
4302 l2cap_chan_put(chan
);
4304 mutex_unlock(&conn
->chan_lock
);
4309 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4310 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4313 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4315 struct l2cap_chan
*chan
;
4317 if (cmd_len
!= sizeof(*rsp
))
4320 scid
= __le16_to_cpu(rsp
->scid
);
4321 dcid
= __le16_to_cpu(rsp
->dcid
);
4323 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4325 mutex_lock(&conn
->chan_lock
);
4327 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4329 mutex_unlock(&conn
->chan_lock
);
4333 l2cap_chan_lock(chan
);
4335 l2cap_chan_hold(chan
);
4336 l2cap_chan_del(chan
, 0);
4338 l2cap_chan_unlock(chan
);
4340 chan
->ops
->close(chan
);
4341 l2cap_chan_put(chan
);
4343 mutex_unlock(&conn
->chan_lock
);
4348 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4349 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4352 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4355 if (cmd_len
!= sizeof(*req
))
4358 type
= __le16_to_cpu(req
->type
);
4360 BT_DBG("type 0x%4.4x", type
);
4362 if (type
== L2CAP_IT_FEAT_MASK
) {
4364 u32 feat_mask
= l2cap_feat_mask
;
4365 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4366 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4367 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4369 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4371 if (conn
->local_fixed_chan
& L2CAP_FC_A2MP
)
4372 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4373 | L2CAP_FEAT_EXT_WINDOW
;
4375 put_unaligned_le32(feat_mask
, rsp
->data
);
4376 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4378 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4380 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4382 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4383 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4384 rsp
->data
[0] = conn
->local_fixed_chan
;
4385 memset(rsp
->data
+ 1, 0, 7);
4386 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4389 struct l2cap_info_rsp rsp
;
4390 rsp
.type
= cpu_to_le16(type
);
4391 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4392 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4399 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4400 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4403 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4406 if (cmd_len
< sizeof(*rsp
))
4409 type
= __le16_to_cpu(rsp
->type
);
4410 result
= __le16_to_cpu(rsp
->result
);
4412 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4414 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4415 if (cmd
->ident
!= conn
->info_ident
||
4416 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4419 cancel_delayed_work(&conn
->info_timer
);
4421 if (result
!= L2CAP_IR_SUCCESS
) {
4422 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4423 conn
->info_ident
= 0;
4425 l2cap_conn_start(conn
);
4431 case L2CAP_IT_FEAT_MASK
:
4432 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4434 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4435 struct l2cap_info_req req
;
4436 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4438 conn
->info_ident
= l2cap_get_ident(conn
);
4440 l2cap_send_cmd(conn
, conn
->info_ident
,
4441 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4443 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4444 conn
->info_ident
= 0;
4446 l2cap_conn_start(conn
);
4450 case L2CAP_IT_FIXED_CHAN
:
4451 conn
->remote_fixed_chan
= rsp
->data
[0];
4452 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4453 conn
->info_ident
= 0;
4455 l2cap_conn_start(conn
);
4462 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4463 struct l2cap_cmd_hdr
*cmd
,
4464 u16 cmd_len
, void *data
)
4466 struct l2cap_create_chan_req
*req
= data
;
4467 struct l2cap_create_chan_rsp rsp
;
4468 struct l2cap_chan
*chan
;
4469 struct hci_dev
*hdev
;
4472 if (cmd_len
!= sizeof(*req
))
4475 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4478 psm
= le16_to_cpu(req
->psm
);
4479 scid
= le16_to_cpu(req
->scid
);
4481 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4483 /* For controller id 0 make BR/EDR connection */
4484 if (req
->amp_id
== AMP_ID_BREDR
) {
4485 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4490 /* Validate AMP controller id */
4491 hdev
= hci_dev_get(req
->amp_id
);
4495 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4500 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4503 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4504 struct hci_conn
*hs_hcon
;
4506 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4510 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4515 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4517 mgr
->bredr_chan
= chan
;
4518 chan
->hs_hcon
= hs_hcon
;
4519 chan
->fcs
= L2CAP_FCS_NONE
;
4520 conn
->mtu
= hdev
->block_mtu
;
4529 rsp
.scid
= cpu_to_le16(scid
);
4530 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4531 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4533 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4539 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4541 struct l2cap_move_chan_req req
;
4544 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4546 ident
= l2cap_get_ident(chan
->conn
);
4547 chan
->ident
= ident
;
4549 req
.icid
= cpu_to_le16(chan
->scid
);
4550 req
.dest_amp_id
= dest_amp_id
;
4552 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4555 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4558 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4560 struct l2cap_move_chan_rsp rsp
;
4562 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4564 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4565 rsp
.result
= cpu_to_le16(result
);
4567 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4571 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4573 struct l2cap_move_chan_cfm cfm
;
4575 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4577 chan
->ident
= l2cap_get_ident(chan
->conn
);
4579 cfm
.icid
= cpu_to_le16(chan
->scid
);
4580 cfm
.result
= cpu_to_le16(result
);
4582 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4585 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4588 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4590 struct l2cap_move_chan_cfm cfm
;
4592 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4594 cfm
.icid
= cpu_to_le16(icid
);
4595 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4597 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4601 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4604 struct l2cap_move_chan_cfm_rsp rsp
;
4606 BT_DBG("icid 0x%4.4x", icid
);
4608 rsp
.icid
= cpu_to_le16(icid
);
4609 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4612 static void __release_logical_link(struct l2cap_chan
*chan
)
4614 chan
->hs_hchan
= NULL
;
4615 chan
->hs_hcon
= NULL
;
4617 /* Placeholder - release the logical link */
4620 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4622 /* Logical link setup failed */
4623 if (chan
->state
!= BT_CONNECTED
) {
4624 /* Create channel failure, disconnect */
4625 l2cap_send_disconn_req(chan
, ECONNRESET
);
4629 switch (chan
->move_role
) {
4630 case L2CAP_MOVE_ROLE_RESPONDER
:
4631 l2cap_move_done(chan
);
4632 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4634 case L2CAP_MOVE_ROLE_INITIATOR
:
4635 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4636 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4637 /* Remote has only sent pending or
4638 * success responses, clean up
4640 l2cap_move_done(chan
);
4643 /* Other amp move states imply that the move
4644 * has already aborted
4646 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4651 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4652 struct hci_chan
*hchan
)
4654 struct l2cap_conf_rsp rsp
;
4656 chan
->hs_hchan
= hchan
;
4657 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4659 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4661 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4664 set_default_fcs(chan
);
4666 err
= l2cap_ertm_init(chan
);
4668 l2cap_send_disconn_req(chan
, -err
);
4670 l2cap_chan_ready(chan
);
4674 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4675 struct hci_chan
*hchan
)
4677 chan
->hs_hcon
= hchan
->conn
;
4678 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4680 BT_DBG("move_state %d", chan
->move_state
);
4682 switch (chan
->move_state
) {
4683 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4684 /* Move confirm will be sent after a success
4685 * response is received
4687 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4689 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4690 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4691 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4692 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4693 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4694 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4695 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4696 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4697 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4701 /* Move was not in expected state, free the channel */
4702 __release_logical_link(chan
);
4704 chan
->move_state
= L2CAP_MOVE_STABLE
;
4708 /* Call with chan locked */
4709 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4712 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4715 l2cap_logical_fail(chan
);
4716 __release_logical_link(chan
);
4720 if (chan
->state
!= BT_CONNECTED
) {
4721 /* Ignore logical link if channel is on BR/EDR */
4722 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4723 l2cap_logical_finish_create(chan
, hchan
);
4725 l2cap_logical_finish_move(chan
, hchan
);
4729 void l2cap_move_start(struct l2cap_chan
*chan
)
4731 BT_DBG("chan %p", chan
);
4733 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4734 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4736 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4737 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4738 /* Placeholder - start physical link setup */
4740 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4741 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4743 l2cap_move_setup(chan
);
4744 l2cap_send_move_chan_req(chan
, 0);
4748 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4749 u8 local_amp_id
, u8 remote_amp_id
)
4751 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4752 local_amp_id
, remote_amp_id
);
4754 chan
->fcs
= L2CAP_FCS_NONE
;
4756 /* Outgoing channel on AMP */
4757 if (chan
->state
== BT_CONNECT
) {
4758 if (result
== L2CAP_CR_SUCCESS
) {
4759 chan
->local_amp_id
= local_amp_id
;
4760 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4762 /* Revert to BR/EDR connect */
4763 l2cap_send_conn_req(chan
);
4769 /* Incoming channel on AMP */
4770 if (__l2cap_no_conn_pending(chan
)) {
4771 struct l2cap_conn_rsp rsp
;
4773 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4774 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4776 if (result
== L2CAP_CR_SUCCESS
) {
4777 /* Send successful response */
4778 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4779 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4781 /* Send negative response */
4782 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4783 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4786 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4789 if (result
== L2CAP_CR_SUCCESS
) {
4790 l2cap_state_change(chan
, BT_CONFIG
);
4791 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4792 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4794 l2cap_build_conf_req(chan
, buf
), buf
);
4795 chan
->num_conf_req
++;
4800 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4803 l2cap_move_setup(chan
);
4804 chan
->move_id
= local_amp_id
;
4805 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4807 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4810 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4812 struct hci_chan
*hchan
= NULL
;
4814 /* Placeholder - get hci_chan for logical link */
4817 if (hchan
->state
== BT_CONNECTED
) {
4818 /* Logical link is ready to go */
4819 chan
->hs_hcon
= hchan
->conn
;
4820 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4821 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4822 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4824 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4826 /* Wait for logical link to be ready */
4827 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4830 /* Logical link not available */
4831 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4835 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4837 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4839 if (result
== -EINVAL
)
4840 rsp_result
= L2CAP_MR_BAD_ID
;
4842 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4844 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4847 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4848 chan
->move_state
= L2CAP_MOVE_STABLE
;
4850 /* Restart data transmission */
4851 l2cap_ertm_send(chan
);
4854 /* Invoke with locked chan */
4855 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4857 u8 local_amp_id
= chan
->local_amp_id
;
4858 u8 remote_amp_id
= chan
->remote_amp_id
;
4860 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4861 chan
, result
, local_amp_id
, remote_amp_id
);
4863 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4864 l2cap_chan_unlock(chan
);
4868 if (chan
->state
!= BT_CONNECTED
) {
4869 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4870 } else if (result
!= L2CAP_MR_SUCCESS
) {
4871 l2cap_do_move_cancel(chan
, result
);
4873 switch (chan
->move_role
) {
4874 case L2CAP_MOVE_ROLE_INITIATOR
:
4875 l2cap_do_move_initiate(chan
, local_amp_id
,
4878 case L2CAP_MOVE_ROLE_RESPONDER
:
4879 l2cap_do_move_respond(chan
, result
);
4882 l2cap_do_move_cancel(chan
, result
);
4888 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4889 struct l2cap_cmd_hdr
*cmd
,
4890 u16 cmd_len
, void *data
)
4892 struct l2cap_move_chan_req
*req
= data
;
4893 struct l2cap_move_chan_rsp rsp
;
4894 struct l2cap_chan
*chan
;
4896 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4898 if (cmd_len
!= sizeof(*req
))
4901 icid
= le16_to_cpu(req
->icid
);
4903 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4905 if (!(conn
->local_fixed_chan
& L2CAP_FC_A2MP
))
4908 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4910 rsp
.icid
= cpu_to_le16(icid
);
4911 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4912 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4917 chan
->ident
= cmd
->ident
;
4919 if (chan
->scid
< L2CAP_CID_DYN_START
||
4920 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4921 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4922 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4923 result
= L2CAP_MR_NOT_ALLOWED
;
4924 goto send_move_response
;
4927 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4928 result
= L2CAP_MR_SAME_ID
;
4929 goto send_move_response
;
4932 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4933 struct hci_dev
*hdev
;
4934 hdev
= hci_dev_get(req
->dest_amp_id
);
4935 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4936 !test_bit(HCI_UP
, &hdev
->flags
)) {
4940 result
= L2CAP_MR_BAD_ID
;
4941 goto send_move_response
;
4946 /* Detect a move collision. Only send a collision response
4947 * if this side has "lost", otherwise proceed with the move.
4948 * The winner has the larger bd_addr.
4950 if ((__chan_is_moving(chan
) ||
4951 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4952 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4953 result
= L2CAP_MR_COLLISION
;
4954 goto send_move_response
;
4957 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4958 l2cap_move_setup(chan
);
4959 chan
->move_id
= req
->dest_amp_id
;
4962 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4963 /* Moving to BR/EDR */
4964 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4965 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4966 result
= L2CAP_MR_PEND
;
4968 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4969 result
= L2CAP_MR_SUCCESS
;
4972 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4973 /* Placeholder - uncomment when amp functions are available */
4974 /*amp_accept_physical(chan, req->dest_amp_id);*/
4975 result
= L2CAP_MR_PEND
;
4979 l2cap_send_move_chan_rsp(chan
, result
);
4981 l2cap_chan_unlock(chan
);
4986 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4988 struct l2cap_chan
*chan
;
4989 struct hci_chan
*hchan
= NULL
;
4991 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4993 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4997 __clear_chan_timer(chan
);
4998 if (result
== L2CAP_MR_PEND
)
4999 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5001 switch (chan
->move_state
) {
5002 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5003 /* Move confirm will be sent when logical link
5006 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5008 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5009 if (result
== L2CAP_MR_PEND
) {
5011 } else if (test_bit(CONN_LOCAL_BUSY
,
5012 &chan
->conn_state
)) {
5013 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5015 /* Logical link is up or moving to BR/EDR,
5018 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5019 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5022 case L2CAP_MOVE_WAIT_RSP
:
5024 if (result
== L2CAP_MR_SUCCESS
) {
5025 /* Remote is ready, send confirm immediately
5026 * after logical link is ready
5028 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5030 /* Both logical link and move success
5031 * are required to confirm
5033 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5036 /* Placeholder - get hci_chan for logical link */
5038 /* Logical link not available */
5039 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5043 /* If the logical link is not yet connected, do not
5044 * send confirmation.
5046 if (hchan
->state
!= BT_CONNECTED
)
5049 /* Logical link is already ready to go */
5051 chan
->hs_hcon
= hchan
->conn
;
5052 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5054 if (result
== L2CAP_MR_SUCCESS
) {
5055 /* Can confirm now */
5056 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5058 /* Now only need move success
5061 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5064 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5067 /* Any other amp move state means the move failed. */
5068 chan
->move_id
= chan
->local_amp_id
;
5069 l2cap_move_done(chan
);
5070 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5073 l2cap_chan_unlock(chan
);
5076 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5079 struct l2cap_chan
*chan
;
5081 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5083 /* Could not locate channel, icid is best guess */
5084 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5088 __clear_chan_timer(chan
);
5090 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5091 if (result
== L2CAP_MR_COLLISION
) {
5092 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5094 /* Cleanup - cancel move */
5095 chan
->move_id
= chan
->local_amp_id
;
5096 l2cap_move_done(chan
);
5100 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5102 l2cap_chan_unlock(chan
);
5105 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5106 struct l2cap_cmd_hdr
*cmd
,
5107 u16 cmd_len
, void *data
)
5109 struct l2cap_move_chan_rsp
*rsp
= data
;
5112 if (cmd_len
!= sizeof(*rsp
))
5115 icid
= le16_to_cpu(rsp
->icid
);
5116 result
= le16_to_cpu(rsp
->result
);
5118 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5120 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5121 l2cap_move_continue(conn
, icid
, result
);
5123 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5128 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5129 struct l2cap_cmd_hdr
*cmd
,
5130 u16 cmd_len
, void *data
)
5132 struct l2cap_move_chan_cfm
*cfm
= data
;
5133 struct l2cap_chan
*chan
;
5136 if (cmd_len
!= sizeof(*cfm
))
5139 icid
= le16_to_cpu(cfm
->icid
);
5140 result
= le16_to_cpu(cfm
->result
);
5142 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5144 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5146 /* Spec requires a response even if the icid was not found */
5147 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5151 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5152 if (result
== L2CAP_MC_CONFIRMED
) {
5153 chan
->local_amp_id
= chan
->move_id
;
5154 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5155 __release_logical_link(chan
);
5157 chan
->move_id
= chan
->local_amp_id
;
5160 l2cap_move_done(chan
);
5163 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5165 l2cap_chan_unlock(chan
);
5170 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5171 struct l2cap_cmd_hdr
*cmd
,
5172 u16 cmd_len
, void *data
)
5174 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5175 struct l2cap_chan
*chan
;
5178 if (cmd_len
!= sizeof(*rsp
))
5181 icid
= le16_to_cpu(rsp
->icid
);
5183 BT_DBG("icid 0x%4.4x", icid
);
5185 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5189 __clear_chan_timer(chan
);
5191 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5192 chan
->local_amp_id
= chan
->move_id
;
5194 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5195 __release_logical_link(chan
);
5197 l2cap_move_done(chan
);
5200 l2cap_chan_unlock(chan
);
5205 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5206 struct l2cap_cmd_hdr
*cmd
,
5207 u16 cmd_len
, u8
*data
)
5209 struct hci_conn
*hcon
= conn
->hcon
;
5210 struct l2cap_conn_param_update_req
*req
;
5211 struct l2cap_conn_param_update_rsp rsp
;
5212 u16 min
, max
, latency
, to_multiplier
;
5215 if (hcon
->role
!= HCI_ROLE_MASTER
)
5218 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5221 req
= (struct l2cap_conn_param_update_req
*) data
;
5222 min
= __le16_to_cpu(req
->min
);
5223 max
= __le16_to_cpu(req
->max
);
5224 latency
= __le16_to_cpu(req
->latency
);
5225 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5227 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5228 min
, max
, latency
, to_multiplier
);
5230 memset(&rsp
, 0, sizeof(rsp
));
5232 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5234 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5236 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5238 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5244 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5246 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5247 store_hint
, min
, max
, latency
,
5255 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5256 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5259 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5260 struct hci_conn
*hcon
= conn
->hcon
;
5261 u16 dcid
, mtu
, mps
, credits
, result
;
5262 struct l2cap_chan
*chan
;
5265 if (cmd_len
< sizeof(*rsp
))
5268 dcid
= __le16_to_cpu(rsp
->dcid
);
5269 mtu
= __le16_to_cpu(rsp
->mtu
);
5270 mps
= __le16_to_cpu(rsp
->mps
);
5271 credits
= __le16_to_cpu(rsp
->credits
);
5272 result
= __le16_to_cpu(rsp
->result
);
5274 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23 ||
5275 dcid
< L2CAP_CID_DYN_START
||
5276 dcid
> L2CAP_CID_LE_DYN_END
))
5279 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5280 dcid
, mtu
, mps
, credits
, result
);
5282 mutex_lock(&conn
->chan_lock
);
5284 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5292 l2cap_chan_lock(chan
);
5295 case L2CAP_CR_SUCCESS
:
5296 if (__l2cap_get_chan_by_dcid(conn
, dcid
)) {
5304 chan
->remote_mps
= mps
;
5305 chan
->tx_credits
= credits
;
5306 l2cap_chan_ready(chan
);
5309 case L2CAP_CR_AUTHENTICATION
:
5310 case L2CAP_CR_ENCRYPTION
:
5311 /* If we already have MITM protection we can't do
5314 if (hcon
->sec_level
> BT_SECURITY_MEDIUM
) {
5315 l2cap_chan_del(chan
, ECONNREFUSED
);
5319 sec_level
= hcon
->sec_level
+ 1;
5320 if (chan
->sec_level
< sec_level
)
5321 chan
->sec_level
= sec_level
;
5323 /* We'll need to send a new Connect Request */
5324 clear_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
);
5326 smp_conn_security(hcon
, chan
->sec_level
);
5330 l2cap_chan_del(chan
, ECONNREFUSED
);
5334 l2cap_chan_unlock(chan
);
5337 mutex_unlock(&conn
->chan_lock
);
5342 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5343 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5348 switch (cmd
->code
) {
5349 case L2CAP_COMMAND_REJ
:
5350 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5353 case L2CAP_CONN_REQ
:
5354 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5357 case L2CAP_CONN_RSP
:
5358 case L2CAP_CREATE_CHAN_RSP
:
5359 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5362 case L2CAP_CONF_REQ
:
5363 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5366 case L2CAP_CONF_RSP
:
5367 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5370 case L2CAP_DISCONN_REQ
:
5371 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5374 case L2CAP_DISCONN_RSP
:
5375 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5378 case L2CAP_ECHO_REQ
:
5379 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5382 case L2CAP_ECHO_RSP
:
5385 case L2CAP_INFO_REQ
:
5386 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5389 case L2CAP_INFO_RSP
:
5390 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5393 case L2CAP_CREATE_CHAN_REQ
:
5394 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5397 case L2CAP_MOVE_CHAN_REQ
:
5398 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5401 case L2CAP_MOVE_CHAN_RSP
:
5402 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5405 case L2CAP_MOVE_CHAN_CFM
:
5406 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5409 case L2CAP_MOVE_CHAN_CFM_RSP
:
5410 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5414 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5422 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5423 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5426 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5427 struct l2cap_le_conn_rsp rsp
;
5428 struct l2cap_chan
*chan
, *pchan
;
5429 u16 dcid
, scid
, credits
, mtu
, mps
;
5433 if (cmd_len
!= sizeof(*req
))
5436 scid
= __le16_to_cpu(req
->scid
);
5437 mtu
= __le16_to_cpu(req
->mtu
);
5438 mps
= __le16_to_cpu(req
->mps
);
5443 if (mtu
< 23 || mps
< 23)
5446 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5449 /* Check if we have socket listening on psm */
5450 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5451 &conn
->hcon
->dst
, LE_LINK
);
5453 result
= L2CAP_CR_BAD_PSM
;
5458 mutex_lock(&conn
->chan_lock
);
5459 l2cap_chan_lock(pchan
);
5461 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
,
5463 result
= L2CAP_CR_AUTHENTICATION
;
5465 goto response_unlock
;
5468 /* Check for valid dynamic CID range */
5469 if (scid
< L2CAP_CID_DYN_START
|| scid
> L2CAP_CID_LE_DYN_END
) {
5470 result
= L2CAP_CR_INVALID_SCID
;
5472 goto response_unlock
;
5475 /* Check if we already have channel with that dcid */
5476 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5477 result
= L2CAP_CR_SCID_IN_USE
;
5479 goto response_unlock
;
5482 chan
= pchan
->ops
->new_connection(pchan
);
5484 result
= L2CAP_CR_NO_MEM
;
5485 goto response_unlock
;
5488 l2cap_le_flowctl_init(chan
);
5490 bacpy(&chan
->src
, &conn
->hcon
->src
);
5491 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5492 chan
->src_type
= bdaddr_src_type(conn
->hcon
);
5493 chan
->dst_type
= bdaddr_dst_type(conn
->hcon
);
5497 chan
->remote_mps
= mps
;
5498 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5500 __l2cap_chan_add(conn
, chan
);
5502 credits
= chan
->rx_credits
;
5504 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5506 chan
->ident
= cmd
->ident
;
5508 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5509 l2cap_state_change(chan
, BT_CONNECT2
);
5510 /* The following result value is actually not defined
5511 * for LE CoC but we use it to let the function know
5512 * that it should bail out after doing its cleanup
5513 * instead of sending a response.
5515 result
= L2CAP_CR_PEND
;
5516 chan
->ops
->defer(chan
);
5518 l2cap_chan_ready(chan
);
5519 result
= L2CAP_CR_SUCCESS
;
5523 l2cap_chan_unlock(pchan
);
5524 mutex_unlock(&conn
->chan_lock
);
5525 l2cap_chan_put(pchan
);
5527 if (result
== L2CAP_CR_PEND
)
5532 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5533 rsp
.mps
= cpu_to_le16(chan
->mps
);
5539 rsp
.dcid
= cpu_to_le16(dcid
);
5540 rsp
.credits
= cpu_to_le16(credits
);
5541 rsp
.result
= cpu_to_le16(result
);
5543 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5548 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5549 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5552 struct l2cap_le_credits
*pkt
;
5553 struct l2cap_chan
*chan
;
5554 u16 cid
, credits
, max_credits
;
5556 if (cmd_len
!= sizeof(*pkt
))
5559 pkt
= (struct l2cap_le_credits
*) data
;
5560 cid
= __le16_to_cpu(pkt
->cid
);
5561 credits
= __le16_to_cpu(pkt
->credits
);
5563 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5565 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5569 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5570 if (credits
> max_credits
) {
5571 BT_ERR("LE credits overflow");
5572 l2cap_send_disconn_req(chan
, ECONNRESET
);
5573 l2cap_chan_unlock(chan
);
5575 /* Return 0 so that we don't trigger an unnecessary
5576 * command reject packet.
5581 chan
->tx_credits
+= credits
;
5583 /* Resume sending */
5584 l2cap_le_flowctl_send(chan
);
5586 if (chan
->tx_credits
)
5587 chan
->ops
->resume(chan
);
5589 l2cap_chan_unlock(chan
);
5594 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5595 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5598 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5599 struct l2cap_chan
*chan
;
5601 if (cmd_len
< sizeof(*rej
))
5604 mutex_lock(&conn
->chan_lock
);
5606 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5610 l2cap_chan_lock(chan
);
5611 l2cap_chan_del(chan
, ECONNREFUSED
);
5612 l2cap_chan_unlock(chan
);
5615 mutex_unlock(&conn
->chan_lock
);
5619 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5620 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5625 switch (cmd
->code
) {
5626 case L2CAP_COMMAND_REJ
:
5627 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5630 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5631 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5634 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5637 case L2CAP_LE_CONN_RSP
:
5638 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5641 case L2CAP_LE_CONN_REQ
:
5642 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5645 case L2CAP_LE_CREDITS
:
5646 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5649 case L2CAP_DISCONN_REQ
:
5650 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5653 case L2CAP_DISCONN_RSP
:
5654 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5658 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5666 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5667 struct sk_buff
*skb
)
5669 struct hci_conn
*hcon
= conn
->hcon
;
5670 struct l2cap_cmd_hdr
*cmd
;
5674 if (hcon
->type
!= LE_LINK
)
5677 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5680 cmd
= (void *) skb
->data
;
5681 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5683 len
= le16_to_cpu(cmd
->len
);
5685 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5687 if (len
!= skb
->len
|| !cmd
->ident
) {
5688 BT_DBG("corrupted command");
5692 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5694 struct l2cap_cmd_rej_unk rej
;
5696 BT_ERR("Wrong link type (%d)", err
);
5698 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5699 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5707 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5708 struct sk_buff
*skb
)
5710 struct hci_conn
*hcon
= conn
->hcon
;
5711 u8
*data
= skb
->data
;
5713 struct l2cap_cmd_hdr cmd
;
5716 l2cap_raw_recv(conn
, skb
);
5718 if (hcon
->type
!= ACL_LINK
)
5721 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5723 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5724 data
+= L2CAP_CMD_HDR_SIZE
;
5725 len
-= L2CAP_CMD_HDR_SIZE
;
5727 cmd_len
= le16_to_cpu(cmd
.len
);
5729 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5732 if (cmd_len
> len
|| !cmd
.ident
) {
5733 BT_DBG("corrupted command");
5737 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5739 struct l2cap_cmd_rej_unk rej
;
5741 BT_ERR("Wrong link type (%d)", err
);
5743 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5744 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5756 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5758 u16 our_fcs
, rcv_fcs
;
5761 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5762 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5764 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5766 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5767 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5768 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5769 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5771 if (our_fcs
!= rcv_fcs
)
5777 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5779 struct l2cap_ctrl control
;
5781 BT_DBG("chan %p", chan
);
5783 memset(&control
, 0, sizeof(control
));
5786 control
.reqseq
= chan
->buffer_seq
;
5787 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5789 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5790 control
.super
= L2CAP_SUPER_RNR
;
5791 l2cap_send_sframe(chan
, &control
);
5794 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5795 chan
->unacked_frames
> 0)
5796 __set_retrans_timer(chan
);
5798 /* Send pending iframes */
5799 l2cap_ertm_send(chan
);
5801 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5802 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5803 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5806 control
.super
= L2CAP_SUPER_RR
;
5807 l2cap_send_sframe(chan
, &control
);
5811 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5812 struct sk_buff
**last_frag
)
5814 /* skb->len reflects data in skb as well as all fragments
5815 * skb->data_len reflects only data in fragments
5817 if (!skb_has_frag_list(skb
))
5818 skb_shinfo(skb
)->frag_list
= new_frag
;
5820 new_frag
->next
= NULL
;
5822 (*last_frag
)->next
= new_frag
;
5823 *last_frag
= new_frag
;
5825 skb
->len
+= new_frag
->len
;
5826 skb
->data_len
+= new_frag
->len
;
5827 skb
->truesize
+= new_frag
->truesize
;
5830 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5831 struct l2cap_ctrl
*control
)
5835 switch (control
->sar
) {
5836 case L2CAP_SAR_UNSEGMENTED
:
5840 err
= chan
->ops
->recv(chan
, skb
);
5843 case L2CAP_SAR_START
:
5847 if (!pskb_may_pull(skb
, L2CAP_SDULEN_SIZE
))
5850 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5851 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5853 if (chan
->sdu_len
> chan
->imtu
) {
5858 if (skb
->len
>= chan
->sdu_len
)
5862 chan
->sdu_last_frag
= skb
;
5868 case L2CAP_SAR_CONTINUE
:
5872 append_skb_frag(chan
->sdu
, skb
,
5873 &chan
->sdu_last_frag
);
5876 if (chan
->sdu
->len
>= chan
->sdu_len
)
5886 append_skb_frag(chan
->sdu
, skb
,
5887 &chan
->sdu_last_frag
);
5890 if (chan
->sdu
->len
!= chan
->sdu_len
)
5893 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5896 /* Reassembly complete */
5898 chan
->sdu_last_frag
= NULL
;
5906 kfree_skb(chan
->sdu
);
5908 chan
->sdu_last_frag
= NULL
;
5915 static int l2cap_resegment(struct l2cap_chan
*chan
)
5921 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5925 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5928 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5929 l2cap_tx(chan
, NULL
, NULL
, event
);
5932 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5935 /* Pass sequential frames to l2cap_reassemble_sdu()
5936 * until a gap is encountered.
5939 BT_DBG("chan %p", chan
);
5941 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5942 struct sk_buff
*skb
;
5943 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5944 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5946 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5951 skb_unlink(skb
, &chan
->srej_q
);
5952 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5953 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->l2cap
);
5958 if (skb_queue_empty(&chan
->srej_q
)) {
5959 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5960 l2cap_send_ack(chan
);
5966 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5967 struct l2cap_ctrl
*control
)
5969 struct sk_buff
*skb
;
5971 BT_DBG("chan %p, control %p", chan
, control
);
5973 if (control
->reqseq
== chan
->next_tx_seq
) {
5974 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5975 l2cap_send_disconn_req(chan
, ECONNRESET
);
5979 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5982 BT_DBG("Seq %d not available for retransmission",
5987 if (chan
->max_tx
!= 0 && bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
5988 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5989 l2cap_send_disconn_req(chan
, ECONNRESET
);
5993 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5995 if (control
->poll
) {
5996 l2cap_pass_to_tx(chan
, control
);
5998 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5999 l2cap_retransmit(chan
, control
);
6000 l2cap_ertm_send(chan
);
6002 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6003 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6004 chan
->srej_save_reqseq
= control
->reqseq
;
6007 l2cap_pass_to_tx_fbit(chan
, control
);
6009 if (control
->final
) {
6010 if (chan
->srej_save_reqseq
!= control
->reqseq
||
6011 !test_and_clear_bit(CONN_SREJ_ACT
,
6013 l2cap_retransmit(chan
, control
);
6015 l2cap_retransmit(chan
, control
);
6016 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6017 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6018 chan
->srej_save_reqseq
= control
->reqseq
;
6024 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
6025 struct l2cap_ctrl
*control
)
6027 struct sk_buff
*skb
;
6029 BT_DBG("chan %p, control %p", chan
, control
);
6031 if (control
->reqseq
== chan
->next_tx_seq
) {
6032 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6033 l2cap_send_disconn_req(chan
, ECONNRESET
);
6037 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6039 if (chan
->max_tx
&& skb
&&
6040 bt_cb(skb
)->l2cap
.retries
>= chan
->max_tx
) {
6041 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6042 l2cap_send_disconn_req(chan
, ECONNRESET
);
6046 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6048 l2cap_pass_to_tx(chan
, control
);
6050 if (control
->final
) {
6051 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6052 l2cap_retransmit_all(chan
, control
);
6054 l2cap_retransmit_all(chan
, control
);
6055 l2cap_ertm_send(chan
);
6056 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6057 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6061 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6063 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6065 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6066 chan
->expected_tx_seq
);
6068 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6069 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6071 /* See notes below regarding "double poll" and
6074 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6075 BT_DBG("Invalid/Ignore - after SREJ");
6076 return L2CAP_TXSEQ_INVALID_IGNORE
;
6078 BT_DBG("Invalid - in window after SREJ sent");
6079 return L2CAP_TXSEQ_INVALID
;
6083 if (chan
->srej_list
.head
== txseq
) {
6084 BT_DBG("Expected SREJ");
6085 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6088 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6089 BT_DBG("Duplicate SREJ - txseq already stored");
6090 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6093 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6094 BT_DBG("Unexpected SREJ - not requested");
6095 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6099 if (chan
->expected_tx_seq
== txseq
) {
6100 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6102 BT_DBG("Invalid - txseq outside tx window");
6103 return L2CAP_TXSEQ_INVALID
;
6106 return L2CAP_TXSEQ_EXPECTED
;
6110 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6111 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6112 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6113 return L2CAP_TXSEQ_DUPLICATE
;
6116 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6117 /* A source of invalid packets is a "double poll" condition,
6118 * where delays cause us to send multiple poll packets. If
6119 * the remote stack receives and processes both polls,
6120 * sequence numbers can wrap around in such a way that a
6121 * resent frame has a sequence number that looks like new data
6122 * with a sequence gap. This would trigger an erroneous SREJ
6125 * Fortunately, this is impossible with a tx window that's
6126 * less than half of the maximum sequence number, which allows
6127 * invalid frames to be safely ignored.
6129 * With tx window sizes greater than half of the tx window
6130 * maximum, the frame is invalid and cannot be ignored. This
6131 * causes a disconnect.
6134 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6135 BT_DBG("Invalid/Ignore - txseq outside tx window");
6136 return L2CAP_TXSEQ_INVALID_IGNORE
;
6138 BT_DBG("Invalid - txseq outside tx window");
6139 return L2CAP_TXSEQ_INVALID
;
6142 BT_DBG("Unexpected - txseq indicates missing frames");
6143 return L2CAP_TXSEQ_UNEXPECTED
;
6147 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6148 struct l2cap_ctrl
*control
,
6149 struct sk_buff
*skb
, u8 event
)
6152 bool skb_in_use
= false;
6154 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6158 case L2CAP_EV_RECV_IFRAME
:
6159 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6160 case L2CAP_TXSEQ_EXPECTED
:
6161 l2cap_pass_to_tx(chan
, control
);
6163 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6164 BT_DBG("Busy, discarding expected seq %d",
6169 chan
->expected_tx_seq
= __next_seq(chan
,
6172 chan
->buffer_seq
= chan
->expected_tx_seq
;
6175 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6179 if (control
->final
) {
6180 if (!test_and_clear_bit(CONN_REJ_ACT
,
6181 &chan
->conn_state
)) {
6183 l2cap_retransmit_all(chan
, control
);
6184 l2cap_ertm_send(chan
);
6188 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6189 l2cap_send_ack(chan
);
6191 case L2CAP_TXSEQ_UNEXPECTED
:
6192 l2cap_pass_to_tx(chan
, control
);
6194 /* Can't issue SREJ frames in the local busy state.
6195 * Drop this frame, it will be seen as missing
6196 * when local busy is exited.
6198 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6199 BT_DBG("Busy, discarding unexpected seq %d",
6204 /* There was a gap in the sequence, so an SREJ
6205 * must be sent for each missing frame. The
6206 * current frame is stored for later use.
6208 skb_queue_tail(&chan
->srej_q
, skb
);
6210 BT_DBG("Queued %p (queue len %d)", skb
,
6211 skb_queue_len(&chan
->srej_q
));
6213 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6214 l2cap_seq_list_clear(&chan
->srej_list
);
6215 l2cap_send_srej(chan
, control
->txseq
);
6217 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6219 case L2CAP_TXSEQ_DUPLICATE
:
6220 l2cap_pass_to_tx(chan
, control
);
6222 case L2CAP_TXSEQ_INVALID_IGNORE
:
6224 case L2CAP_TXSEQ_INVALID
:
6226 l2cap_send_disconn_req(chan
, ECONNRESET
);
6230 case L2CAP_EV_RECV_RR
:
6231 l2cap_pass_to_tx(chan
, control
);
6232 if (control
->final
) {
6233 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6235 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6236 !__chan_is_moving(chan
)) {
6238 l2cap_retransmit_all(chan
, control
);
6241 l2cap_ertm_send(chan
);
6242 } else if (control
->poll
) {
6243 l2cap_send_i_or_rr_or_rnr(chan
);
6245 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6246 &chan
->conn_state
) &&
6247 chan
->unacked_frames
)
6248 __set_retrans_timer(chan
);
6250 l2cap_ertm_send(chan
);
6253 case L2CAP_EV_RECV_RNR
:
6254 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6255 l2cap_pass_to_tx(chan
, control
);
6256 if (control
&& control
->poll
) {
6257 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6258 l2cap_send_rr_or_rnr(chan
, 0);
6260 __clear_retrans_timer(chan
);
6261 l2cap_seq_list_clear(&chan
->retrans_list
);
6263 case L2CAP_EV_RECV_REJ
:
6264 l2cap_handle_rej(chan
, control
);
6266 case L2CAP_EV_RECV_SREJ
:
6267 l2cap_handle_srej(chan
, control
);
6273 if (skb
&& !skb_in_use
) {
6274 BT_DBG("Freeing %p", skb
);
6281 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6282 struct l2cap_ctrl
*control
,
6283 struct sk_buff
*skb
, u8 event
)
6286 u16 txseq
= control
->txseq
;
6287 bool skb_in_use
= false;
6289 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6293 case L2CAP_EV_RECV_IFRAME
:
6294 switch (l2cap_classify_txseq(chan
, txseq
)) {
6295 case L2CAP_TXSEQ_EXPECTED
:
6296 /* Keep frame for reassembly later */
6297 l2cap_pass_to_tx(chan
, control
);
6298 skb_queue_tail(&chan
->srej_q
, skb
);
6300 BT_DBG("Queued %p (queue len %d)", skb
,
6301 skb_queue_len(&chan
->srej_q
));
6303 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6305 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6306 l2cap_seq_list_pop(&chan
->srej_list
);
6308 l2cap_pass_to_tx(chan
, control
);
6309 skb_queue_tail(&chan
->srej_q
, skb
);
6311 BT_DBG("Queued %p (queue len %d)", skb
,
6312 skb_queue_len(&chan
->srej_q
));
6314 err
= l2cap_rx_queued_iframes(chan
);
6319 case L2CAP_TXSEQ_UNEXPECTED
:
6320 /* Got a frame that can't be reassembled yet.
6321 * Save it for later, and send SREJs to cover
6322 * the missing frames.
6324 skb_queue_tail(&chan
->srej_q
, skb
);
6326 BT_DBG("Queued %p (queue len %d)", skb
,
6327 skb_queue_len(&chan
->srej_q
));
6329 l2cap_pass_to_tx(chan
, control
);
6330 l2cap_send_srej(chan
, control
->txseq
);
6332 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6333 /* This frame was requested with an SREJ, but
6334 * some expected retransmitted frames are
6335 * missing. Request retransmission of missing
6338 skb_queue_tail(&chan
->srej_q
, skb
);
6340 BT_DBG("Queued %p (queue len %d)", skb
,
6341 skb_queue_len(&chan
->srej_q
));
6343 l2cap_pass_to_tx(chan
, control
);
6344 l2cap_send_srej_list(chan
, control
->txseq
);
6346 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6347 /* We've already queued this frame. Drop this copy. */
6348 l2cap_pass_to_tx(chan
, control
);
6350 case L2CAP_TXSEQ_DUPLICATE
:
6351 /* Expecting a later sequence number, so this frame
6352 * was already received. Ignore it completely.
6355 case L2CAP_TXSEQ_INVALID_IGNORE
:
6357 case L2CAP_TXSEQ_INVALID
:
6359 l2cap_send_disconn_req(chan
, ECONNRESET
);
6363 case L2CAP_EV_RECV_RR
:
6364 l2cap_pass_to_tx(chan
, control
);
6365 if (control
->final
) {
6366 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6368 if (!test_and_clear_bit(CONN_REJ_ACT
,
6369 &chan
->conn_state
)) {
6371 l2cap_retransmit_all(chan
, control
);
6374 l2cap_ertm_send(chan
);
6375 } else if (control
->poll
) {
6376 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6377 &chan
->conn_state
) &&
6378 chan
->unacked_frames
) {
6379 __set_retrans_timer(chan
);
6382 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6383 l2cap_send_srej_tail(chan
);
6385 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6386 &chan
->conn_state
) &&
6387 chan
->unacked_frames
)
6388 __set_retrans_timer(chan
);
6390 l2cap_send_ack(chan
);
6393 case L2CAP_EV_RECV_RNR
:
6394 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6395 l2cap_pass_to_tx(chan
, control
);
6396 if (control
->poll
) {
6397 l2cap_send_srej_tail(chan
);
6399 struct l2cap_ctrl rr_control
;
6400 memset(&rr_control
, 0, sizeof(rr_control
));
6401 rr_control
.sframe
= 1;
6402 rr_control
.super
= L2CAP_SUPER_RR
;
6403 rr_control
.reqseq
= chan
->buffer_seq
;
6404 l2cap_send_sframe(chan
, &rr_control
);
6408 case L2CAP_EV_RECV_REJ
:
6409 l2cap_handle_rej(chan
, control
);
6411 case L2CAP_EV_RECV_SREJ
:
6412 l2cap_handle_srej(chan
, control
);
6416 if (skb
&& !skb_in_use
) {
6417 BT_DBG("Freeing %p", skb
);
6424 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6426 BT_DBG("chan %p", chan
);
6428 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6431 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6433 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6435 return l2cap_resegment(chan
);
6438 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6439 struct l2cap_ctrl
*control
,
6440 struct sk_buff
*skb
, u8 event
)
6444 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6450 l2cap_process_reqseq(chan
, control
->reqseq
);
6452 if (!skb_queue_empty(&chan
->tx_q
))
6453 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6455 chan
->tx_send_head
= NULL
;
6457 /* Rewind next_tx_seq to the point expected
6460 chan
->next_tx_seq
= control
->reqseq
;
6461 chan
->unacked_frames
= 0;
6463 err
= l2cap_finish_move(chan
);
6467 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6468 l2cap_send_i_or_rr_or_rnr(chan
);
6470 if (event
== L2CAP_EV_RECV_IFRAME
)
6473 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6476 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6477 struct l2cap_ctrl
*control
,
6478 struct sk_buff
*skb
, u8 event
)
6482 if (!control
->final
)
6485 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6487 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6488 l2cap_process_reqseq(chan
, control
->reqseq
);
6490 if (!skb_queue_empty(&chan
->tx_q
))
6491 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6493 chan
->tx_send_head
= NULL
;
6495 /* Rewind next_tx_seq to the point expected
6498 chan
->next_tx_seq
= control
->reqseq
;
6499 chan
->unacked_frames
= 0;
6502 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6504 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6506 err
= l2cap_resegment(chan
);
6509 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6514 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6516 /* Make sure reqseq is for a packet that has been sent but not acked */
6519 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6520 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6523 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6524 struct sk_buff
*skb
, u8 event
)
6528 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6529 control
, skb
, event
, chan
->rx_state
);
6531 if (__valid_reqseq(chan
, control
->reqseq
)) {
6532 switch (chan
->rx_state
) {
6533 case L2CAP_RX_STATE_RECV
:
6534 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6536 case L2CAP_RX_STATE_SREJ_SENT
:
6537 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6540 case L2CAP_RX_STATE_WAIT_P
:
6541 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6543 case L2CAP_RX_STATE_WAIT_F
:
6544 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6551 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6552 control
->reqseq
, chan
->next_tx_seq
,
6553 chan
->expected_ack_seq
);
6554 l2cap_send_disconn_req(chan
, ECONNRESET
);
6560 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6561 struct sk_buff
*skb
)
6563 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6566 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6567 L2CAP_TXSEQ_EXPECTED
) {
6568 l2cap_pass_to_tx(chan
, control
);
6570 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6571 __next_seq(chan
, chan
->buffer_seq
));
6573 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6575 l2cap_reassemble_sdu(chan
, skb
, control
);
6578 kfree_skb(chan
->sdu
);
6581 chan
->sdu_last_frag
= NULL
;
6585 BT_DBG("Freeing %p", skb
);
6590 chan
->last_acked_seq
= control
->txseq
;
6591 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6596 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6598 struct l2cap_ctrl
*control
= &bt_cb(skb
)->l2cap
;
6602 __unpack_control(chan
, skb
);
6607 * We can just drop the corrupted I-frame here.
6608 * Receiver will miss it and start proper recovery
6609 * procedures and ask for retransmission.
6611 if (l2cap_check_fcs(chan
, skb
))
6614 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6615 len
-= L2CAP_SDULEN_SIZE
;
6617 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6618 len
-= L2CAP_FCS_SIZE
;
6620 if (len
> chan
->mps
) {
6621 l2cap_send_disconn_req(chan
, ECONNRESET
);
6625 if ((chan
->mode
== L2CAP_MODE_ERTM
||
6626 chan
->mode
== L2CAP_MODE_STREAMING
) && sk_filter(chan
->data
, skb
))
6629 if (!control
->sframe
) {
6632 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6633 control
->sar
, control
->reqseq
, control
->final
,
6636 /* Validate F-bit - F=0 always valid, F=1 only
6637 * valid in TX WAIT_F
6639 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6642 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6643 event
= L2CAP_EV_RECV_IFRAME
;
6644 err
= l2cap_rx(chan
, control
, skb
, event
);
6646 err
= l2cap_stream_rx(chan
, control
, skb
);
6650 l2cap_send_disconn_req(chan
, ECONNRESET
);
6652 const u8 rx_func_to_event
[4] = {
6653 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6654 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6657 /* Only I-frames are expected in streaming mode */
6658 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6661 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6662 control
->reqseq
, control
->final
, control
->poll
,
6666 BT_ERR("Trailing bytes: %d in sframe", len
);
6667 l2cap_send_disconn_req(chan
, ECONNRESET
);
6671 /* Validate F and P bits */
6672 if (control
->final
&& (control
->poll
||
6673 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6676 event
= rx_func_to_event
[control
->super
];
6677 if (l2cap_rx(chan
, control
, skb
, event
))
6678 l2cap_send_disconn_req(chan
, ECONNRESET
);
6688 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6690 struct l2cap_conn
*conn
= chan
->conn
;
6691 struct l2cap_le_credits pkt
;
6694 /* We return more credits to the sender only after the amount of
6695 * credits falls below half of the initial amount.
6697 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6700 return_credits
= le_max_credits
- chan
->rx_credits
;
6702 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6704 chan
->rx_credits
+= return_credits
;
6706 pkt
.cid
= cpu_to_le16(chan
->scid
);
6707 pkt
.credits
= cpu_to_le16(return_credits
);
6709 chan
->ident
= l2cap_get_ident(conn
);
6711 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6714 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6718 if (!chan
->rx_credits
) {
6719 BT_ERR("No credits to receive LE L2CAP data");
6720 l2cap_send_disconn_req(chan
, ECONNRESET
);
6724 if (chan
->imtu
< skb
->len
) {
6725 BT_ERR("Too big LE L2CAP PDU");
6730 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6732 l2cap_chan_le_send_credits(chan
);
6739 sdu_len
= get_unaligned_le16(skb
->data
);
6740 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6742 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6743 sdu_len
, skb
->len
, chan
->imtu
);
6745 if (sdu_len
> chan
->imtu
) {
6746 BT_ERR("Too big LE L2CAP SDU length received");
6751 if (skb
->len
> sdu_len
) {
6752 BT_ERR("Too much LE L2CAP data received");
6757 if (skb
->len
== sdu_len
)
6758 return chan
->ops
->recv(chan
, skb
);
6761 chan
->sdu_len
= sdu_len
;
6762 chan
->sdu_last_frag
= skb
;
6767 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6768 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6770 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6771 BT_ERR("Too much LE L2CAP data received");
6776 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6779 if (chan
->sdu
->len
== chan
->sdu_len
) {
6780 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6783 chan
->sdu_last_frag
= NULL
;
6791 kfree_skb(chan
->sdu
);
6793 chan
->sdu_last_frag
= NULL
;
6797 /* We can't return an error here since we took care of the skb
6798 * freeing internally. An error return would cause the caller to
6799 * do a double-free of the skb.
6804 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6805 struct sk_buff
*skb
)
6807 struct l2cap_chan
*chan
;
6809 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6811 if (cid
== L2CAP_CID_A2MP
) {
6812 chan
= a2mp_channel_create(conn
, skb
);
6818 l2cap_chan_lock(chan
);
6820 BT_DBG("unknown cid 0x%4.4x", cid
);
6821 /* Drop packet and return */
6827 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6829 /* If we receive data on a fixed channel before the info req/rsp
6830 * procdure is done simply assume that the channel is supported
6831 * and mark it as ready.
6833 if (chan
->chan_type
== L2CAP_CHAN_FIXED
)
6834 l2cap_chan_ready(chan
);
6836 if (chan
->state
!= BT_CONNECTED
)
6839 switch (chan
->mode
) {
6840 case L2CAP_MODE_LE_FLOWCTL
:
6841 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6846 case L2CAP_MODE_BASIC
:
6847 /* If socket recv buffers overflows we drop data here
6848 * which is *bad* because L2CAP has to be reliable.
6849 * But we don't have any other choice. L2CAP doesn't
6850 * provide flow control mechanism. */
6852 if (chan
->imtu
< skb
->len
) {
6853 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6857 if (!chan
->ops
->recv(chan
, skb
))
6861 case L2CAP_MODE_ERTM
:
6862 case L2CAP_MODE_STREAMING
:
6863 l2cap_data_rcv(chan
, skb
);
6867 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6875 l2cap_chan_unlock(chan
);
6878 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6879 struct sk_buff
*skb
)
6881 struct hci_conn
*hcon
= conn
->hcon
;
6882 struct l2cap_chan
*chan
;
6884 if (hcon
->type
!= ACL_LINK
)
6887 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6892 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6894 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6897 if (chan
->imtu
< skb
->len
)
6900 /* Store remote BD_ADDR and PSM for msg_name */
6901 bacpy(&bt_cb(skb
)->l2cap
.bdaddr
, &hcon
->dst
);
6902 bt_cb(skb
)->l2cap
.psm
= psm
;
6904 if (!chan
->ops
->recv(chan
, skb
)) {
6905 l2cap_chan_put(chan
);
6910 l2cap_chan_put(chan
);
6915 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6917 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6918 struct hci_conn
*hcon
= conn
->hcon
;
6922 if (hcon
->state
!= BT_CONNECTED
) {
6923 BT_DBG("queueing pending rx skb");
6924 skb_queue_tail(&conn
->pending_rx
, skb
);
6928 skb_pull(skb
, L2CAP_HDR_SIZE
);
6929 cid
= __le16_to_cpu(lh
->cid
);
6930 len
= __le16_to_cpu(lh
->len
);
6932 if (len
!= skb
->len
) {
6937 /* Since we can't actively block incoming LE connections we must
6938 * at least ensure that we ignore incoming data from them.
6940 if (hcon
->type
== LE_LINK
&&
6941 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6942 bdaddr_dst_type(hcon
))) {
6947 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6950 case L2CAP_CID_SIGNALING
:
6951 l2cap_sig_channel(conn
, skb
);
6954 case L2CAP_CID_CONN_LESS
:
6955 psm
= get_unaligned((__le16
*) skb
->data
);
6956 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6957 l2cap_conless_channel(conn
, psm
, skb
);
6960 case L2CAP_CID_LE_SIGNALING
:
6961 l2cap_le_sig_channel(conn
, skb
);
6965 l2cap_data_channel(conn
, cid
, skb
);
6970 static void process_pending_rx(struct work_struct
*work
)
6972 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6974 struct sk_buff
*skb
;
6978 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6979 l2cap_recv_frame(conn
, skb
);
6982 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6984 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6985 struct hci_chan
*hchan
;
6990 hchan
= hci_chan_create(hcon
);
6994 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6996 hci_chan_del(hchan
);
7000 kref_init(&conn
->ref
);
7001 hcon
->l2cap_data
= conn
;
7002 conn
->hcon
= hci_conn_get(hcon
);
7003 conn
->hchan
= hchan
;
7005 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
7007 switch (hcon
->type
) {
7009 if (hcon
->hdev
->le_mtu
) {
7010 conn
->mtu
= hcon
->hdev
->le_mtu
;
7015 conn
->mtu
= hcon
->hdev
->acl_mtu
;
7019 conn
->feat_mask
= 0;
7021 conn
->local_fixed_chan
= L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
;
7023 if (hcon
->type
== ACL_LINK
&&
7024 hci_dev_test_flag(hcon
->hdev
, HCI_HS_ENABLED
))
7025 conn
->local_fixed_chan
|= L2CAP_FC_A2MP
;
7027 if (hci_dev_test_flag(hcon
->hdev
, HCI_LE_ENABLED
) &&
7028 (bredr_sc_enabled(hcon
->hdev
) ||
7029 hci_dev_test_flag(hcon
->hdev
, HCI_FORCE_BREDR_SMP
)))
7030 conn
->local_fixed_chan
|= L2CAP_FC_SMP_BREDR
;
7032 mutex_init(&conn
->ident_lock
);
7033 mutex_init(&conn
->chan_lock
);
7035 INIT_LIST_HEAD(&conn
->chan_l
);
7036 INIT_LIST_HEAD(&conn
->users
);
7038 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7040 skb_queue_head_init(&conn
->pending_rx
);
7041 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7042 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
7044 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7049 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7053 if (bdaddr_type_is_le(dst_type
))
7054 return (psm
<= 0x00ff);
7056 /* PSM must be odd and lsb of upper byte must be 0 */
7057 return ((psm
& 0x0101) == 0x0001);
7060 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7061 bdaddr_t
*dst
, u8 dst_type
)
7063 struct l2cap_conn
*conn
;
7064 struct hci_conn
*hcon
;
7065 struct hci_dev
*hdev
;
7068 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7069 dst_type
, __le16_to_cpu(psm
));
7071 hdev
= hci_get_route(dst
, &chan
->src
, chan
->src_type
);
7073 return -EHOSTUNREACH
;
7077 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7078 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7083 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7088 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7093 switch (chan
->mode
) {
7094 case L2CAP_MODE_BASIC
:
7096 case L2CAP_MODE_LE_FLOWCTL
:
7097 l2cap_le_flowctl_init(chan
);
7099 case L2CAP_MODE_ERTM
:
7100 case L2CAP_MODE_STREAMING
:
7109 switch (chan
->state
) {
7113 /* Already connecting */
7118 /* Already connected */
7132 /* Set destination address and psm */
7133 bacpy(&chan
->dst
, dst
);
7134 chan
->dst_type
= dst_type
;
7139 if (bdaddr_type_is_le(dst_type
)) {
7140 /* Convert from L2CAP channel address type to HCI address type
7142 if (dst_type
== BDADDR_LE_PUBLIC
)
7143 dst_type
= ADDR_LE_DEV_PUBLIC
;
7145 dst_type
= ADDR_LE_DEV_RANDOM
;
7147 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7148 hcon
= hci_connect_le(hdev
, dst
, dst_type
,
7150 HCI_LE_CONN_TIMEOUT
,
7153 hcon
= hci_connect_le_scan(hdev
, dst
, dst_type
,
7155 HCI_LE_CONN_TIMEOUT
);
7158 u8 auth_type
= l2cap_get_auth_type(chan
);
7159 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7163 err
= PTR_ERR(hcon
);
7167 conn
= l2cap_conn_add(hcon
);
7169 hci_conn_drop(hcon
);
7174 mutex_lock(&conn
->chan_lock
);
7175 l2cap_chan_lock(chan
);
7177 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7178 hci_conn_drop(hcon
);
7183 /* Update source addr of the socket */
7184 bacpy(&chan
->src
, &hcon
->src
);
7185 chan
->src_type
= bdaddr_src_type(hcon
);
7187 __l2cap_chan_add(conn
, chan
);
7189 /* l2cap_chan_add takes its own ref so we can drop this one */
7190 hci_conn_drop(hcon
);
7192 l2cap_state_change(chan
, BT_CONNECT
);
7193 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7195 /* Release chan->sport so that it can be reused by other
7196 * sockets (as it's only used for listening sockets).
7198 write_lock(&chan_list_lock
);
7200 write_unlock(&chan_list_lock
);
7202 if (hcon
->state
== BT_CONNECTED
) {
7203 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7204 __clear_chan_timer(chan
);
7205 if (l2cap_chan_check_security(chan
, true))
7206 l2cap_state_change(chan
, BT_CONNECTED
);
7208 l2cap_do_start(chan
);
7214 l2cap_chan_unlock(chan
);
7215 mutex_unlock(&conn
->chan_lock
);
7217 hci_dev_unlock(hdev
);
7221 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7223 /* ---- L2CAP interface with lower layer (HCI) ---- */
7225 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7227 int exact
= 0, lm1
= 0, lm2
= 0;
7228 struct l2cap_chan
*c
;
7230 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7232 /* Find listening sockets and check their link_mode */
7233 read_lock(&chan_list_lock
);
7234 list_for_each_entry(c
, &chan_list
, global_l
) {
7235 if (c
->state
!= BT_LISTEN
)
7238 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7239 lm1
|= HCI_LM_ACCEPT
;
7240 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7241 lm1
|= HCI_LM_MASTER
;
7243 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7244 lm2
|= HCI_LM_ACCEPT
;
7245 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7246 lm2
|= HCI_LM_MASTER
;
7249 read_unlock(&chan_list_lock
);
7251 return exact
? lm1
: lm2
;
7254 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7255 * from an existing channel in the list or from the beginning of the
7256 * global list (by passing NULL as first parameter).
7258 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7259 struct hci_conn
*hcon
)
7261 u8 src_type
= bdaddr_src_type(hcon
);
7263 read_lock(&chan_list_lock
);
7266 c
= list_next_entry(c
, global_l
);
7268 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7270 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7271 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7273 if (c
->state
!= BT_LISTEN
)
7275 if (bacmp(&c
->src
, &hcon
->src
) && bacmp(&c
->src
, BDADDR_ANY
))
7277 if (src_type
!= c
->src_type
)
7281 read_unlock(&chan_list_lock
);
7285 read_unlock(&chan_list_lock
);
7290 static void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7292 struct hci_dev
*hdev
= hcon
->hdev
;
7293 struct l2cap_conn
*conn
;
7294 struct l2cap_chan
*pchan
;
7297 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7300 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7303 l2cap_conn_del(hcon
, bt_to_errno(status
));
7307 conn
= l2cap_conn_add(hcon
);
7311 dst_type
= bdaddr_dst_type(hcon
);
7313 /* If device is blocked, do not create channels for it */
7314 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7317 /* Find fixed channels and notify them of the new connection. We
7318 * use multiple individual lookups, continuing each time where
7319 * we left off, because the list lock would prevent calling the
7320 * potentially sleeping l2cap_chan_lock() function.
7322 pchan
= l2cap_global_fixed_chan(NULL
, hcon
);
7324 struct l2cap_chan
*chan
, *next
;
7326 /* Client fixed channels should override server ones */
7327 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7330 l2cap_chan_lock(pchan
);
7331 chan
= pchan
->ops
->new_connection(pchan
);
7333 bacpy(&chan
->src
, &hcon
->src
);
7334 bacpy(&chan
->dst
, &hcon
->dst
);
7335 chan
->src_type
= bdaddr_src_type(hcon
);
7336 chan
->dst_type
= dst_type
;
7338 __l2cap_chan_add(conn
, chan
);
7341 l2cap_chan_unlock(pchan
);
7343 next
= l2cap_global_fixed_chan(pchan
, hcon
);
7344 l2cap_chan_put(pchan
);
7348 l2cap_conn_ready(conn
);
7351 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7353 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7355 BT_DBG("hcon %p", hcon
);
7358 return HCI_ERROR_REMOTE_USER_TERM
;
7359 return conn
->disc_reason
;
7362 static void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7364 if (hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
)
7367 BT_DBG("hcon %p reason %d", hcon
, reason
);
7369 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7372 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7374 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7377 if (encrypt
== 0x00) {
7378 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7379 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7380 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7381 chan
->sec_level
== BT_SECURITY_FIPS
)
7382 l2cap_chan_close(chan
, ECONNREFUSED
);
7384 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7385 __clear_chan_timer(chan
);
7389 static void l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7391 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7392 struct l2cap_chan
*chan
;
7397 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7399 mutex_lock(&conn
->chan_lock
);
7401 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7402 l2cap_chan_lock(chan
);
7404 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7405 state_to_string(chan
->state
));
7407 if (chan
->scid
== L2CAP_CID_A2MP
) {
7408 l2cap_chan_unlock(chan
);
7412 if (!status
&& encrypt
)
7413 chan
->sec_level
= hcon
->sec_level
;
7415 if (!__l2cap_no_conn_pending(chan
)) {
7416 l2cap_chan_unlock(chan
);
7420 if (!status
&& (chan
->state
== BT_CONNECTED
||
7421 chan
->state
== BT_CONFIG
)) {
7422 chan
->ops
->resume(chan
);
7423 l2cap_check_encryption(chan
, encrypt
);
7424 l2cap_chan_unlock(chan
);
7428 if (chan
->state
== BT_CONNECT
) {
7430 l2cap_start_connection(chan
);
7432 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7433 } else if (chan
->state
== BT_CONNECT2
&&
7434 chan
->mode
!= L2CAP_MODE_LE_FLOWCTL
) {
7435 struct l2cap_conn_rsp rsp
;
7439 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7440 res
= L2CAP_CR_PEND
;
7441 stat
= L2CAP_CS_AUTHOR_PEND
;
7442 chan
->ops
->defer(chan
);
7444 l2cap_state_change(chan
, BT_CONFIG
);
7445 res
= L2CAP_CR_SUCCESS
;
7446 stat
= L2CAP_CS_NO_INFO
;
7449 l2cap_state_change(chan
, BT_DISCONN
);
7450 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7451 res
= L2CAP_CR_SEC_BLOCK
;
7452 stat
= L2CAP_CS_NO_INFO
;
7455 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7456 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7457 rsp
.result
= cpu_to_le16(res
);
7458 rsp
.status
= cpu_to_le16(stat
);
7459 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7462 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7463 res
== L2CAP_CR_SUCCESS
) {
7465 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7466 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7468 l2cap_build_conf_req(chan
, buf
),
7470 chan
->num_conf_req
++;
7474 l2cap_chan_unlock(chan
);
7477 mutex_unlock(&conn
->chan_lock
);
7480 void l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7482 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7483 struct l2cap_hdr
*hdr
;
7486 /* For AMP controller do not create l2cap conn */
7487 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_PRIMARY
)
7491 conn
= l2cap_conn_add(hcon
);
7496 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7500 case ACL_START_NO_FLUSH
:
7503 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7504 kfree_skb(conn
->rx_skb
);
7505 conn
->rx_skb
= NULL
;
7507 l2cap_conn_unreliable(conn
, ECOMM
);
7510 /* Start fragment always begin with Basic L2CAP header */
7511 if (skb
->len
< L2CAP_HDR_SIZE
) {
7512 BT_ERR("Frame is too short (len %d)", skb
->len
);
7513 l2cap_conn_unreliable(conn
, ECOMM
);
7517 hdr
= (struct l2cap_hdr
*) skb
->data
;
7518 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7520 if (len
== skb
->len
) {
7521 /* Complete frame received */
7522 l2cap_recv_frame(conn
, skb
);
7526 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7528 if (skb
->len
> len
) {
7529 BT_ERR("Frame is too long (len %d, expected len %d)",
7531 l2cap_conn_unreliable(conn
, ECOMM
);
7535 /* Allocate skb for the complete frame (with header) */
7536 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7540 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7542 conn
->rx_len
= len
- skb
->len
;
7546 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7548 if (!conn
->rx_len
) {
7549 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7550 l2cap_conn_unreliable(conn
, ECOMM
);
7554 if (skb
->len
> conn
->rx_len
) {
7555 BT_ERR("Fragment is too long (len %d, expected %d)",
7556 skb
->len
, conn
->rx_len
);
7557 kfree_skb(conn
->rx_skb
);
7558 conn
->rx_skb
= NULL
;
7560 l2cap_conn_unreliable(conn
, ECOMM
);
7564 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7566 conn
->rx_len
-= skb
->len
;
7568 if (!conn
->rx_len
) {
7569 /* Complete frame received. l2cap_recv_frame
7570 * takes ownership of the skb so set the global
7571 * rx_skb pointer to NULL first.
7573 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7574 conn
->rx_skb
= NULL
;
7575 l2cap_recv_frame(conn
, rx_skb
);
7584 static struct hci_cb l2cap_cb
= {
7586 .connect_cfm
= l2cap_connect_cfm
,
7587 .disconn_cfm
= l2cap_disconn_cfm
,
7588 .security_cfm
= l2cap_security_cfm
,
7591 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7593 struct l2cap_chan
*c
;
7595 read_lock(&chan_list_lock
);
7597 list_for_each_entry(c
, &chan_list
, global_l
) {
7598 seq_printf(f
, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7599 &c
->src
, c
->src_type
, &c
->dst
, c
->dst_type
,
7600 c
->state
, __le16_to_cpu(c
->psm
),
7601 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7602 c
->sec_level
, c
->mode
);
7605 read_unlock(&chan_list_lock
);
7610 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7612 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7615 static const struct file_operations l2cap_debugfs_fops
= {
7616 .open
= l2cap_debugfs_open
,
7618 .llseek
= seq_lseek
,
7619 .release
= single_release
,
7622 static struct dentry
*l2cap_debugfs
;
7624 int __init
l2cap_init(void)
7628 err
= l2cap_init_sockets();
7632 hci_register_cb(&l2cap_cb
);
7634 if (IS_ERR_OR_NULL(bt_debugfs
))
7637 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7638 NULL
, &l2cap_debugfs_fops
);
7640 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7642 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7648 void l2cap_exit(void)
7650 debugfs_remove(l2cap_debugfs
);
7651 hci_unregister_cb(&l2cap_cb
);
7652 l2cap_cleanup_sockets();
7655 module_param(disable_ertm
, bool, 0644);
7656 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");