2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
47 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
48 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
50 static LIST_HEAD(chan_list
);
51 static DEFINE_RWLOCK(chan_list_lock
);
53 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
54 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
56 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
57 u8 code
, u8 ident
, u16 dlen
, void *data
);
58 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
60 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
61 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
63 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
64 struct sk_buff_head
*skbs
, u8 event
);
66 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
68 if (hcon
->type
== LE_LINK
) {
69 if (type
== ADDR_LE_DEV_PUBLIC
)
70 return BDADDR_LE_PUBLIC
;
72 return BDADDR_LE_RANDOM
;
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
97 list_for_each_entry(c
, &conn
->chan_l
, list
) {
104 /* Find channel with given SCID.
105 * Returns locked channel. */
106 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_scid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 /* Find channel with given DCID.
121 * Returns locked channel.
123 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
126 struct l2cap_chan
*c
;
128 mutex_lock(&conn
->chan_lock
);
129 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
132 mutex_unlock(&conn
->chan_lock
);
137 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
140 struct l2cap_chan
*c
;
142 list_for_each_entry(c
, &conn
->chan_l
, list
) {
143 if (c
->ident
== ident
)
149 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
152 struct l2cap_chan
*c
;
154 mutex_lock(&conn
->chan_lock
);
155 c
= __l2cap_get_chan_by_ident(conn
, ident
);
158 mutex_unlock(&conn
->chan_lock
);
163 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
165 struct l2cap_chan
*c
;
167 list_for_each_entry(c
, &chan_list
, global_l
) {
168 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
174 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
178 write_lock(&chan_list_lock
);
180 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
193 for (p
= 0x1001; p
< 0x1100; p
+= 2)
194 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
195 chan
->psm
= cpu_to_le16(p
);
196 chan
->sport
= cpu_to_le16(p
);
203 write_unlock(&chan_list_lock
);
207 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
209 write_lock(&chan_list_lock
);
213 write_unlock(&chan_list_lock
);
218 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
222 if (conn
->hcon
->type
== LE_LINK
)
223 dyn_end
= L2CAP_CID_LE_DYN_END
;
225 dyn_end
= L2CAP_CID_DYN_END
;
227 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
228 if (!__l2cap_get_chan_by_scid(conn
, cid
))
235 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
237 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
238 state_to_string(state
));
241 chan
->ops
->state_change(chan
, state
, 0);
244 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
248 chan
->ops
->state_change(chan
, chan
->state
, err
);
251 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
253 chan
->ops
->state_change(chan
, chan
->state
, err
);
256 static void __set_retrans_timer(struct l2cap_chan
*chan
)
258 if (!delayed_work_pending(&chan
->monitor_timer
) &&
259 chan
->retrans_timeout
) {
260 l2cap_set_timer(chan
, &chan
->retrans_timer
,
261 msecs_to_jiffies(chan
->retrans_timeout
));
265 static void __set_monitor_timer(struct l2cap_chan
*chan
)
267 __clear_retrans_timer(chan
);
268 if (chan
->monitor_timeout
) {
269 l2cap_set_timer(chan
, &chan
->monitor_timer
,
270 msecs_to_jiffies(chan
->monitor_timeout
));
274 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
279 skb_queue_walk(head
, skb
) {
280 if (bt_cb(skb
)->control
.txseq
== seq
)
287 /* ---- L2CAP sequence number lists ---- */
289 /* For ERTM, ordered lists of sequence numbers must be tracked for
290 * SREJ requests that are received and for frames that are to be
291 * retransmitted. These seq_list functions implement a singly-linked
292 * list in an array, where membership in the list can also be checked
293 * in constant time. Items can also be added to the tail of the list
294 * and removed from the head in constant time, without further memory
298 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
300 size_t alloc_size
, i
;
302 /* Allocated size is a power of 2 to map sequence numbers
303 * (which may be up to 14 bits) in to a smaller array that is
304 * sized for the negotiated ERTM transmit windows.
306 alloc_size
= roundup_pow_of_two(size
);
308 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
312 seq_list
->mask
= alloc_size
- 1;
313 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
314 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
315 for (i
= 0; i
< alloc_size
; i
++)
316 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
321 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
323 kfree(seq_list
->list
);
326 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
329 /* Constant-time check for list membership */
330 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
333 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
335 u16 mask
= seq_list
->mask
;
337 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
338 /* In case someone tries to pop the head of an empty list */
339 return L2CAP_SEQ_LIST_CLEAR
;
340 } else if (seq_list
->head
== seq
) {
341 /* Head can be removed in constant time */
342 seq_list
->head
= seq_list
->list
[seq
& mask
];
343 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
345 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
346 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
347 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
350 /* Walk the list to find the sequence number */
351 u16 prev
= seq_list
->head
;
352 while (seq_list
->list
[prev
& mask
] != seq
) {
353 prev
= seq_list
->list
[prev
& mask
];
354 if (prev
== L2CAP_SEQ_LIST_TAIL
)
355 return L2CAP_SEQ_LIST_CLEAR
;
358 /* Unlink the number from the list and clear it */
359 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
360 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
361 if (seq_list
->tail
== seq
)
362 seq_list
->tail
= prev
;
367 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
369 /* Remove the head in constant time */
370 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
373 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
377 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
380 for (i
= 0; i
<= seq_list
->mask
; i
++)
381 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
383 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
384 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
387 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
389 u16 mask
= seq_list
->mask
;
391 /* All appends happen in constant time */
393 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
396 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
397 seq_list
->head
= seq
;
399 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
401 seq_list
->tail
= seq
;
402 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
405 static void l2cap_chan_timeout(struct work_struct
*work
)
407 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
409 struct l2cap_conn
*conn
= chan
->conn
;
412 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
414 mutex_lock(&conn
->chan_lock
);
415 l2cap_chan_lock(chan
);
417 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
418 reason
= ECONNREFUSED
;
419 else if (chan
->state
== BT_CONNECT
&&
420 chan
->sec_level
!= BT_SECURITY_SDP
)
421 reason
= ECONNREFUSED
;
425 l2cap_chan_close(chan
, reason
);
427 l2cap_chan_unlock(chan
);
429 chan
->ops
->close(chan
);
430 mutex_unlock(&conn
->chan_lock
);
432 l2cap_chan_put(chan
);
435 struct l2cap_chan
*l2cap_chan_create(void)
437 struct l2cap_chan
*chan
;
439 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
443 mutex_init(&chan
->lock
);
445 write_lock(&chan_list_lock
);
446 list_add(&chan
->global_l
, &chan_list
);
447 write_unlock(&chan_list_lock
);
449 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
451 chan
->state
= BT_OPEN
;
453 kref_init(&chan
->kref
);
455 /* This flag is cleared in l2cap_chan_ready() */
456 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
458 BT_DBG("chan %p", chan
);
463 static void l2cap_chan_destroy(struct kref
*kref
)
465 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
467 BT_DBG("chan %p", chan
);
469 write_lock(&chan_list_lock
);
470 list_del(&chan
->global_l
);
471 write_unlock(&chan_list_lock
);
476 void l2cap_chan_hold(struct l2cap_chan
*c
)
478 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
483 void l2cap_chan_put(struct l2cap_chan
*c
)
485 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
487 kref_put(&c
->kref
, l2cap_chan_destroy
);
490 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
492 chan
->fcs
= L2CAP_FCS_CRC16
;
493 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
494 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
495 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
496 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
497 chan
->sec_level
= BT_SECURITY_LOW
;
499 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
502 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
505 chan
->sdu_last_frag
= NULL
;
507 chan
->tx_credits
= 0;
508 chan
->rx_credits
= le_max_credits
;
509 chan
->mps
= min_t(u16
, chan
->imtu
, L2CAP_LE_DEFAULT_MPS
);
511 skb_queue_head_init(&chan
->tx_q
);
514 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
516 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
517 __le16_to_cpu(chan
->psm
), chan
->dcid
);
519 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
523 switch (chan
->chan_type
) {
524 case L2CAP_CHAN_CONN_ORIENTED
:
525 if (conn
->hcon
->type
== LE_LINK
) {
526 if (chan
->dcid
== L2CAP_CID_ATT
) {
527 chan
->omtu
= L2CAP_DEFAULT_MTU
;
528 chan
->scid
= L2CAP_CID_ATT
;
530 chan
->scid
= l2cap_alloc_cid(conn
);
533 /* Alloc CID for connection-oriented socket */
534 chan
->scid
= l2cap_alloc_cid(conn
);
535 chan
->omtu
= L2CAP_DEFAULT_MTU
;
539 case L2CAP_CHAN_CONN_LESS
:
540 /* Connectionless socket */
541 chan
->scid
= L2CAP_CID_CONN_LESS
;
542 chan
->dcid
= L2CAP_CID_CONN_LESS
;
543 chan
->omtu
= L2CAP_DEFAULT_MTU
;
546 case L2CAP_CHAN_CONN_FIX_A2MP
:
547 chan
->scid
= L2CAP_CID_A2MP
;
548 chan
->dcid
= L2CAP_CID_A2MP
;
549 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
550 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
554 /* Raw socket can send/recv signalling messages only */
555 chan
->scid
= L2CAP_CID_SIGNALING
;
556 chan
->dcid
= L2CAP_CID_SIGNALING
;
557 chan
->omtu
= L2CAP_DEFAULT_MTU
;
560 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
561 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
562 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
563 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
564 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
565 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
567 l2cap_chan_hold(chan
);
569 hci_conn_hold(conn
->hcon
);
571 list_add(&chan
->list
, &conn
->chan_l
);
574 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
576 mutex_lock(&conn
->chan_lock
);
577 __l2cap_chan_add(conn
, chan
);
578 mutex_unlock(&conn
->chan_lock
);
581 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
583 struct l2cap_conn
*conn
= chan
->conn
;
585 __clear_chan_timer(chan
);
587 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
590 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
591 /* Delete from channel list */
592 list_del(&chan
->list
);
594 l2cap_chan_put(chan
);
598 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
599 hci_conn_drop(conn
->hcon
);
601 if (mgr
&& mgr
->bredr_chan
== chan
)
602 mgr
->bredr_chan
= NULL
;
605 if (chan
->hs_hchan
) {
606 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
608 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
609 amp_disconnect_logical_link(hs_hchan
);
612 chan
->ops
->teardown(chan
, err
);
614 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
618 case L2CAP_MODE_BASIC
:
621 case L2CAP_MODE_LE_FLOWCTL
:
622 skb_queue_purge(&chan
->tx_q
);
625 case L2CAP_MODE_ERTM
:
626 __clear_retrans_timer(chan
);
627 __clear_monitor_timer(chan
);
628 __clear_ack_timer(chan
);
630 skb_queue_purge(&chan
->srej_q
);
632 l2cap_seq_list_free(&chan
->srej_list
);
633 l2cap_seq_list_free(&chan
->retrans_list
);
637 case L2CAP_MODE_STREAMING
:
638 skb_queue_purge(&chan
->tx_q
);
645 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
647 struct l2cap_conn
*conn
= chan
->conn
;
648 struct l2cap_le_conn_rsp rsp
;
651 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
652 result
= L2CAP_CR_AUTHORIZATION
;
654 result
= L2CAP_CR_BAD_PSM
;
656 l2cap_state_change(chan
, BT_DISCONN
);
658 rsp
.dcid
= cpu_to_le16(chan
->scid
);
659 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
660 rsp
.mps
= cpu_to_le16(chan
->mps
);
661 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
662 rsp
.result
= cpu_to_le16(result
);
664 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
668 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
670 struct l2cap_conn
*conn
= chan
->conn
;
671 struct l2cap_conn_rsp rsp
;
674 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
675 result
= L2CAP_CR_SEC_BLOCK
;
677 result
= L2CAP_CR_BAD_PSM
;
679 l2cap_state_change(chan
, BT_DISCONN
);
681 rsp
.scid
= cpu_to_le16(chan
->dcid
);
682 rsp
.dcid
= cpu_to_le16(chan
->scid
);
683 rsp
.result
= cpu_to_le16(result
);
684 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
686 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
689 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
691 struct l2cap_conn
*conn
= chan
->conn
;
693 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
695 switch (chan
->state
) {
697 chan
->ops
->teardown(chan
, 0);
702 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
703 * check for chan->psm.
705 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& chan
->psm
) {
706 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
707 l2cap_send_disconn_req(chan
, reason
);
709 l2cap_chan_del(chan
, reason
);
713 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
714 if (conn
->hcon
->type
== ACL_LINK
)
715 l2cap_chan_connect_reject(chan
);
716 else if (conn
->hcon
->type
== LE_LINK
)
717 l2cap_chan_le_connect_reject(chan
);
720 l2cap_chan_del(chan
, reason
);
725 l2cap_chan_del(chan
, reason
);
729 chan
->ops
->teardown(chan
, 0);
734 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
736 switch (chan
->chan_type
) {
738 switch (chan
->sec_level
) {
739 case BT_SECURITY_HIGH
:
740 return HCI_AT_DEDICATED_BONDING_MITM
;
741 case BT_SECURITY_MEDIUM
:
742 return HCI_AT_DEDICATED_BONDING
;
744 return HCI_AT_NO_BONDING
;
747 case L2CAP_CHAN_CONN_LESS
:
748 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_3DSP
)) {
749 if (chan
->sec_level
== BT_SECURITY_LOW
)
750 chan
->sec_level
= BT_SECURITY_SDP
;
752 if (chan
->sec_level
== BT_SECURITY_HIGH
)
753 return HCI_AT_NO_BONDING_MITM
;
755 return HCI_AT_NO_BONDING
;
757 case L2CAP_CHAN_CONN_ORIENTED
:
758 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
759 if (chan
->sec_level
== BT_SECURITY_LOW
)
760 chan
->sec_level
= BT_SECURITY_SDP
;
762 if (chan
->sec_level
== BT_SECURITY_HIGH
)
763 return HCI_AT_NO_BONDING_MITM
;
765 return HCI_AT_NO_BONDING
;
769 switch (chan
->sec_level
) {
770 case BT_SECURITY_HIGH
:
771 return HCI_AT_GENERAL_BONDING_MITM
;
772 case BT_SECURITY_MEDIUM
:
773 return HCI_AT_GENERAL_BONDING
;
775 return HCI_AT_NO_BONDING
;
781 /* Service level security */
782 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
784 struct l2cap_conn
*conn
= chan
->conn
;
787 if (conn
->hcon
->type
== LE_LINK
)
788 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
790 auth_type
= l2cap_get_auth_type(chan
);
792 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
795 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
799 /* Get next available identificator.
800 * 1 - 128 are used by kernel.
801 * 129 - 199 are reserved.
802 * 200 - 254 are used by utilities like l2ping, etc.
805 spin_lock(&conn
->lock
);
807 if (++conn
->tx_ident
> 128)
812 spin_unlock(&conn
->lock
);
817 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
820 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
823 BT_DBG("code 0x%2.2x", code
);
828 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
829 flags
= ACL_START_NO_FLUSH
;
833 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
834 skb
->priority
= HCI_PRIO_MAX
;
836 hci_send_acl(conn
->hchan
, skb
, flags
);
839 static bool __chan_is_moving(struct l2cap_chan
*chan
)
841 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
842 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
845 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
847 struct hci_conn
*hcon
= chan
->conn
->hcon
;
850 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
853 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
855 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
862 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
863 lmp_no_flush_capable(hcon
->hdev
))
864 flags
= ACL_START_NO_FLUSH
;
868 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
869 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
872 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
874 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
875 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
877 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
880 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
881 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
888 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
889 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
896 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
898 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
899 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
901 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
904 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
905 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
912 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
913 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
920 static inline void __unpack_control(struct l2cap_chan
*chan
,
923 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
924 __unpack_extended_control(get_unaligned_le32(skb
->data
),
925 &bt_cb(skb
)->control
);
926 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
928 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
929 &bt_cb(skb
)->control
);
930 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
934 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
938 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
939 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
941 if (control
->sframe
) {
942 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
943 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
944 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
946 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
947 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
953 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
957 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
958 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
960 if (control
->sframe
) {
961 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
962 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
963 packed
|= L2CAP_CTRL_FRAME_TYPE
;
965 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
966 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
972 static inline void __pack_control(struct l2cap_chan
*chan
,
973 struct l2cap_ctrl
*control
,
976 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
977 put_unaligned_le32(__pack_extended_control(control
),
978 skb
->data
+ L2CAP_HDR_SIZE
);
980 put_unaligned_le16(__pack_enhanced_control(control
),
981 skb
->data
+ L2CAP_HDR_SIZE
);
985 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
987 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
988 return L2CAP_EXT_HDR_SIZE
;
990 return L2CAP_ENH_HDR_SIZE
;
993 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
997 struct l2cap_hdr
*lh
;
998 int hlen
= __ertm_hdr_size(chan
);
1000 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1001 hlen
+= L2CAP_FCS_SIZE
;
1003 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1006 return ERR_PTR(-ENOMEM
);
1008 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1009 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1010 lh
->cid
= cpu_to_le16(chan
->dcid
);
1012 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1013 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1015 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1017 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1018 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1019 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1022 skb
->priority
= HCI_PRIO_MAX
;
1026 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1027 struct l2cap_ctrl
*control
)
1029 struct sk_buff
*skb
;
1032 BT_DBG("chan %p, control %p", chan
, control
);
1034 if (!control
->sframe
)
1037 if (__chan_is_moving(chan
))
1040 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1044 if (control
->super
== L2CAP_SUPER_RR
)
1045 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1046 else if (control
->super
== L2CAP_SUPER_RNR
)
1047 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1049 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1050 chan
->last_acked_seq
= control
->reqseq
;
1051 __clear_ack_timer(chan
);
1054 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1055 control
->final
, control
->poll
, control
->super
);
1057 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1058 control_field
= __pack_extended_control(control
);
1060 control_field
= __pack_enhanced_control(control
);
1062 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1064 l2cap_do_send(chan
, skb
);
1067 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1069 struct l2cap_ctrl control
;
1071 BT_DBG("chan %p, poll %d", chan
, poll
);
1073 memset(&control
, 0, sizeof(control
));
1075 control
.poll
= poll
;
1077 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1078 control
.super
= L2CAP_SUPER_RNR
;
1080 control
.super
= L2CAP_SUPER_RR
;
1082 control
.reqseq
= chan
->buffer_seq
;
1083 l2cap_send_sframe(chan
, &control
);
1086 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1088 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1091 static bool __amp_capable(struct l2cap_chan
*chan
)
1093 struct l2cap_conn
*conn
= chan
->conn
;
1094 struct hci_dev
*hdev
;
1095 bool amp_available
= false;
1097 if (!conn
->hs_enabled
)
1100 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1103 read_lock(&hci_dev_list_lock
);
1104 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1105 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1106 test_bit(HCI_UP
, &hdev
->flags
)) {
1107 amp_available
= true;
1111 read_unlock(&hci_dev_list_lock
);
1113 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1114 return amp_available
;
1119 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1121 /* Check EFS parameters */
1125 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1127 struct l2cap_conn
*conn
= chan
->conn
;
1128 struct l2cap_conn_req req
;
1130 req
.scid
= cpu_to_le16(chan
->scid
);
1131 req
.psm
= chan
->psm
;
1133 chan
->ident
= l2cap_get_ident(conn
);
1135 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1137 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1140 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1142 struct l2cap_create_chan_req req
;
1143 req
.scid
= cpu_to_le16(chan
->scid
);
1144 req
.psm
= chan
->psm
;
1145 req
.amp_id
= amp_id
;
1147 chan
->ident
= l2cap_get_ident(chan
->conn
);
1149 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1153 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1155 struct sk_buff
*skb
;
1157 BT_DBG("chan %p", chan
);
1159 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1162 __clear_retrans_timer(chan
);
1163 __clear_monitor_timer(chan
);
1164 __clear_ack_timer(chan
);
1166 chan
->retry_count
= 0;
1167 skb_queue_walk(&chan
->tx_q
, skb
) {
1168 if (bt_cb(skb
)->control
.retries
)
1169 bt_cb(skb
)->control
.retries
= 1;
1174 chan
->expected_tx_seq
= chan
->buffer_seq
;
1176 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1177 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1178 l2cap_seq_list_clear(&chan
->retrans_list
);
1179 l2cap_seq_list_clear(&chan
->srej_list
);
1180 skb_queue_purge(&chan
->srej_q
);
1182 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1183 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1185 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1188 static void l2cap_move_done(struct l2cap_chan
*chan
)
1190 u8 move_role
= chan
->move_role
;
1191 BT_DBG("chan %p", chan
);
1193 chan
->move_state
= L2CAP_MOVE_STABLE
;
1194 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1196 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1199 switch (move_role
) {
1200 case L2CAP_MOVE_ROLE_INITIATOR
:
1201 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1202 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1204 case L2CAP_MOVE_ROLE_RESPONDER
:
1205 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1210 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1212 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1213 chan
->conf_state
= 0;
1214 __clear_chan_timer(chan
);
1216 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1217 chan
->ops
->suspend(chan
);
1219 chan
->state
= BT_CONNECTED
;
1221 chan
->ops
->ready(chan
);
1224 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1226 struct l2cap_conn
*conn
= chan
->conn
;
1227 struct l2cap_le_conn_req req
;
1229 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1232 req
.psm
= chan
->psm
;
1233 req
.scid
= cpu_to_le16(chan
->scid
);
1234 req
.mtu
= cpu_to_le16(chan
->imtu
);
1235 req
.mps
= cpu_to_le16(chan
->mps
);
1236 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1238 chan
->ident
= l2cap_get_ident(conn
);
1240 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1244 static void l2cap_le_start(struct l2cap_chan
*chan
)
1246 struct l2cap_conn
*conn
= chan
->conn
;
1248 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1252 l2cap_chan_ready(chan
);
1256 if (chan
->state
== BT_CONNECT
)
1257 l2cap_le_connect(chan
);
1260 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1262 if (__amp_capable(chan
)) {
1263 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1264 a2mp_discover_amp(chan
);
1265 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1266 l2cap_le_start(chan
);
1268 l2cap_send_conn_req(chan
);
1272 static void l2cap_do_start(struct l2cap_chan
*chan
)
1274 struct l2cap_conn
*conn
= chan
->conn
;
1276 if (conn
->hcon
->type
== LE_LINK
) {
1277 l2cap_le_start(chan
);
1281 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1282 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1285 if (l2cap_chan_check_security(chan
) &&
1286 __l2cap_no_conn_pending(chan
)) {
1287 l2cap_start_connection(chan
);
1290 struct l2cap_info_req req
;
1291 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1293 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1294 conn
->info_ident
= l2cap_get_ident(conn
);
1296 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1298 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1303 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1305 u32 local_feat_mask
= l2cap_feat_mask
;
1307 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1310 case L2CAP_MODE_ERTM
:
1311 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1312 case L2CAP_MODE_STREAMING
:
1313 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1319 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1321 struct l2cap_conn
*conn
= chan
->conn
;
1322 struct l2cap_disconn_req req
;
1327 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1328 __clear_retrans_timer(chan
);
1329 __clear_monitor_timer(chan
);
1330 __clear_ack_timer(chan
);
1333 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1334 l2cap_state_change(chan
, BT_DISCONN
);
1338 req
.dcid
= cpu_to_le16(chan
->dcid
);
1339 req
.scid
= cpu_to_le16(chan
->scid
);
1340 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1343 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1346 /* ---- L2CAP connections ---- */
1347 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1349 struct l2cap_chan
*chan
, *tmp
;
1351 BT_DBG("conn %p", conn
);
1353 mutex_lock(&conn
->chan_lock
);
1355 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1356 l2cap_chan_lock(chan
);
1358 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1359 l2cap_chan_unlock(chan
);
1363 if (chan
->state
== BT_CONNECT
) {
1364 if (!l2cap_chan_check_security(chan
) ||
1365 !__l2cap_no_conn_pending(chan
)) {
1366 l2cap_chan_unlock(chan
);
1370 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1371 && test_bit(CONF_STATE2_DEVICE
,
1372 &chan
->conf_state
)) {
1373 l2cap_chan_close(chan
, ECONNRESET
);
1374 l2cap_chan_unlock(chan
);
1378 l2cap_start_connection(chan
);
1380 } else if (chan
->state
== BT_CONNECT2
) {
1381 struct l2cap_conn_rsp rsp
;
1383 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1384 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1386 if (l2cap_chan_check_security(chan
)) {
1387 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1388 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1389 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1390 chan
->ops
->defer(chan
);
1393 l2cap_state_change(chan
, BT_CONFIG
);
1394 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1395 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1398 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1399 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1402 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1405 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1406 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1407 l2cap_chan_unlock(chan
);
1411 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1412 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1413 l2cap_build_conf_req(chan
, buf
), buf
);
1414 chan
->num_conf_req
++;
1417 l2cap_chan_unlock(chan
);
1420 mutex_unlock(&conn
->chan_lock
);
1423 /* Find socket with cid and source/destination bdaddr.
1424 * Returns closest match, locked.
1426 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1430 struct l2cap_chan
*c
, *c1
= NULL
;
1432 read_lock(&chan_list_lock
);
1434 list_for_each_entry(c
, &chan_list
, global_l
) {
1435 if (state
&& c
->state
!= state
)
1438 if (c
->scid
== cid
) {
1439 int src_match
, dst_match
;
1440 int src_any
, dst_any
;
1443 src_match
= !bacmp(&c
->src
, src
);
1444 dst_match
= !bacmp(&c
->dst
, dst
);
1445 if (src_match
&& dst_match
) {
1446 read_unlock(&chan_list_lock
);
1451 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1452 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1453 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1454 (src_any
&& dst_any
))
1459 read_unlock(&chan_list_lock
);
1464 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1466 struct hci_conn
*hcon
= conn
->hcon
;
1467 struct l2cap_chan
*chan
, *pchan
;
1472 bt_6lowpan_add_conn(conn
);
1474 /* Check if we have socket listening on cid */
1475 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1476 &hcon
->src
, &hcon
->dst
);
1480 /* Client ATT sockets should override the server one */
1481 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1484 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1486 /* If device is blocked, do not create a channel for it */
1487 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1490 l2cap_chan_lock(pchan
);
1492 chan
= pchan
->ops
->new_connection(pchan
);
1496 chan
->dcid
= L2CAP_CID_ATT
;
1498 bacpy(&chan
->src
, &hcon
->src
);
1499 bacpy(&chan
->dst
, &hcon
->dst
);
1500 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1501 chan
->dst_type
= dst_type
;
1503 __l2cap_chan_add(conn
, chan
);
1506 l2cap_chan_unlock(pchan
);
1509 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1511 struct l2cap_chan
*chan
;
1512 struct hci_conn
*hcon
= conn
->hcon
;
1514 BT_DBG("conn %p", conn
);
1516 /* For outgoing pairing which doesn't necessarily have an
1517 * associated socket (e.g. mgmt_pair_device).
1519 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1520 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1522 mutex_lock(&conn
->chan_lock
);
1524 if (hcon
->type
== LE_LINK
)
1525 l2cap_le_conn_ready(conn
);
1527 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1529 l2cap_chan_lock(chan
);
1531 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1532 l2cap_chan_unlock(chan
);
1536 if (hcon
->type
== LE_LINK
) {
1537 l2cap_le_start(chan
);
1538 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1539 l2cap_chan_ready(chan
);
1541 } else if (chan
->state
== BT_CONNECT
) {
1542 l2cap_do_start(chan
);
1545 l2cap_chan_unlock(chan
);
1548 mutex_unlock(&conn
->chan_lock
);
1551 /* Notify sockets that we cannot guaranty reliability anymore */
1552 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1554 struct l2cap_chan
*chan
;
1556 BT_DBG("conn %p", conn
);
1558 mutex_lock(&conn
->chan_lock
);
1560 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1561 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1562 l2cap_chan_set_err(chan
, err
);
1565 mutex_unlock(&conn
->chan_lock
);
1568 static void l2cap_info_timeout(struct work_struct
*work
)
1570 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1573 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1574 conn
->info_ident
= 0;
1576 l2cap_conn_start(conn
);
1581 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1582 * callback is called during registration. The ->remove callback is called
1583 * during unregistration.
1584 * An l2cap_user object can either be explicitly unregistered or when the
1585 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1586 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1587 * External modules must own a reference to the l2cap_conn object if they intend
1588 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1589 * any time if they don't.
1592 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1594 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1597 /* We need to check whether l2cap_conn is registered. If it is not, we
1598 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1599 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1600 * relies on the parent hci_conn object to be locked. This itself relies
1601 * on the hci_dev object to be locked. So we must lock the hci device
1606 if (user
->list
.next
|| user
->list
.prev
) {
1611 /* conn->hchan is NULL after l2cap_conn_del() was called */
1617 ret
= user
->probe(conn
, user
);
1621 list_add(&user
->list
, &conn
->users
);
1625 hci_dev_unlock(hdev
);
1628 EXPORT_SYMBOL(l2cap_register_user
);
1630 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1632 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1636 if (!user
->list
.next
|| !user
->list
.prev
)
1639 list_del(&user
->list
);
1640 user
->list
.next
= NULL
;
1641 user
->list
.prev
= NULL
;
1642 user
->remove(conn
, user
);
1645 hci_dev_unlock(hdev
);
1647 EXPORT_SYMBOL(l2cap_unregister_user
);
1649 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1651 struct l2cap_user
*user
;
1653 while (!list_empty(&conn
->users
)) {
1654 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1655 list_del(&user
->list
);
1656 user
->list
.next
= NULL
;
1657 user
->list
.prev
= NULL
;
1658 user
->remove(conn
, user
);
1662 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1664 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1665 struct l2cap_chan
*chan
, *l
;
1670 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1672 kfree_skb(conn
->rx_skb
);
1674 l2cap_unregister_all_users(conn
);
1676 mutex_lock(&conn
->chan_lock
);
1679 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1680 l2cap_chan_hold(chan
);
1681 l2cap_chan_lock(chan
);
1683 l2cap_chan_del(chan
, err
);
1685 l2cap_chan_unlock(chan
);
1687 chan
->ops
->close(chan
);
1688 l2cap_chan_put(chan
);
1691 mutex_unlock(&conn
->chan_lock
);
1693 hci_chan_del(conn
->hchan
);
1695 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1696 cancel_delayed_work_sync(&conn
->info_timer
);
1698 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1699 cancel_delayed_work_sync(&conn
->security_timer
);
1700 smp_chan_destroy(conn
);
1703 hcon
->l2cap_data
= NULL
;
1705 l2cap_conn_put(conn
);
1708 static void security_timeout(struct work_struct
*work
)
1710 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1711 security_timer
.work
);
1713 BT_DBG("conn %p", conn
);
1715 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1716 smp_chan_destroy(conn
);
1717 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1721 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1723 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1724 struct hci_chan
*hchan
;
1729 hchan
= hci_chan_create(hcon
);
1733 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1735 hci_chan_del(hchan
);
1739 kref_init(&conn
->ref
);
1740 hcon
->l2cap_data
= conn
;
1742 hci_conn_get(conn
->hcon
);
1743 conn
->hchan
= hchan
;
1745 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1747 switch (hcon
->type
) {
1749 if (hcon
->hdev
->le_mtu
) {
1750 conn
->mtu
= hcon
->hdev
->le_mtu
;
1755 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1759 conn
->feat_mask
= 0;
1761 if (hcon
->type
== ACL_LINK
)
1762 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
1763 &hcon
->hdev
->dev_flags
);
1765 spin_lock_init(&conn
->lock
);
1766 mutex_init(&conn
->chan_lock
);
1768 INIT_LIST_HEAD(&conn
->chan_l
);
1769 INIT_LIST_HEAD(&conn
->users
);
1771 if (hcon
->type
== LE_LINK
)
1772 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1774 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1776 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1781 static void l2cap_conn_free(struct kref
*ref
)
1783 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1785 hci_conn_put(conn
->hcon
);
1789 void l2cap_conn_get(struct l2cap_conn
*conn
)
1791 kref_get(&conn
->ref
);
1793 EXPORT_SYMBOL(l2cap_conn_get
);
1795 void l2cap_conn_put(struct l2cap_conn
*conn
)
1797 kref_put(&conn
->ref
, l2cap_conn_free
);
1799 EXPORT_SYMBOL(l2cap_conn_put
);
1801 /* ---- Socket interface ---- */
1803 /* Find socket with psm and source / destination bdaddr.
1804 * Returns closest match.
1806 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1811 struct l2cap_chan
*c
, *c1
= NULL
;
1813 read_lock(&chan_list_lock
);
1815 list_for_each_entry(c
, &chan_list
, global_l
) {
1816 if (state
&& c
->state
!= state
)
1819 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1822 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1825 if (c
->psm
== psm
) {
1826 int src_match
, dst_match
;
1827 int src_any
, dst_any
;
1830 src_match
= !bacmp(&c
->src
, src
);
1831 dst_match
= !bacmp(&c
->dst
, dst
);
1832 if (src_match
&& dst_match
) {
1833 read_unlock(&chan_list_lock
);
1838 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1839 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1840 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1841 (src_any
&& dst_any
))
1846 read_unlock(&chan_list_lock
);
1851 static bool is_valid_psm(u16 psm
, u8 dst_type
)
1856 if (bdaddr_type_is_le(dst_type
))
1857 return (psm
<= 0x00ff);
1859 /* PSM must be odd and lsb of upper byte must be 0 */
1860 return ((psm
& 0x0101) == 0x0001);
1863 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1864 bdaddr_t
*dst
, u8 dst_type
)
1866 struct l2cap_conn
*conn
;
1867 struct hci_conn
*hcon
;
1868 struct hci_dev
*hdev
;
1872 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
1873 dst_type
, __le16_to_cpu(psm
));
1875 hdev
= hci_get_route(dst
, &chan
->src
);
1877 return -EHOSTUNREACH
;
1881 l2cap_chan_lock(chan
);
1883 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
1884 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1889 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1894 switch (chan
->mode
) {
1895 case L2CAP_MODE_BASIC
:
1897 case L2CAP_MODE_LE_FLOWCTL
:
1898 l2cap_le_flowctl_init(chan
);
1900 case L2CAP_MODE_ERTM
:
1901 case L2CAP_MODE_STREAMING
:
1910 switch (chan
->state
) {
1914 /* Already connecting */
1919 /* Already connected */
1933 /* Set destination address and psm */
1934 bacpy(&chan
->dst
, dst
);
1935 chan
->dst_type
= dst_type
;
1940 auth_type
= l2cap_get_auth_type(chan
);
1942 if (bdaddr_type_is_le(dst_type
))
1943 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1944 chan
->sec_level
, auth_type
);
1946 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1947 chan
->sec_level
, auth_type
);
1950 err
= PTR_ERR(hcon
);
1954 conn
= l2cap_conn_add(hcon
);
1956 hci_conn_drop(hcon
);
1961 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1962 hci_conn_drop(hcon
);
1967 /* Update source addr of the socket */
1968 bacpy(&chan
->src
, &hcon
->src
);
1969 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1971 l2cap_chan_unlock(chan
);
1972 l2cap_chan_add(conn
, chan
);
1973 l2cap_chan_lock(chan
);
1975 /* l2cap_chan_add takes its own ref so we can drop this one */
1976 hci_conn_drop(hcon
);
1978 l2cap_state_change(chan
, BT_CONNECT
);
1979 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
1981 if (hcon
->state
== BT_CONNECTED
) {
1982 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1983 __clear_chan_timer(chan
);
1984 if (l2cap_chan_check_security(chan
))
1985 l2cap_state_change(chan
, BT_CONNECTED
);
1987 l2cap_do_start(chan
);
1993 l2cap_chan_unlock(chan
);
1994 hci_dev_unlock(hdev
);
1999 static void l2cap_monitor_timeout(struct work_struct
*work
)
2001 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2002 monitor_timer
.work
);
2004 BT_DBG("chan %p", chan
);
2006 l2cap_chan_lock(chan
);
2009 l2cap_chan_unlock(chan
);
2010 l2cap_chan_put(chan
);
2014 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
2016 l2cap_chan_unlock(chan
);
2017 l2cap_chan_put(chan
);
2020 static void l2cap_retrans_timeout(struct work_struct
*work
)
2022 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2023 retrans_timer
.work
);
2025 BT_DBG("chan %p", chan
);
2027 l2cap_chan_lock(chan
);
2030 l2cap_chan_unlock(chan
);
2031 l2cap_chan_put(chan
);
2035 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
2036 l2cap_chan_unlock(chan
);
2037 l2cap_chan_put(chan
);
2040 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
2041 struct sk_buff_head
*skbs
)
2043 struct sk_buff
*skb
;
2044 struct l2cap_ctrl
*control
;
2046 BT_DBG("chan %p, skbs %p", chan
, skbs
);
2048 if (__chan_is_moving(chan
))
2051 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2053 while (!skb_queue_empty(&chan
->tx_q
)) {
2055 skb
= skb_dequeue(&chan
->tx_q
);
2057 bt_cb(skb
)->control
.retries
= 1;
2058 control
= &bt_cb(skb
)->control
;
2060 control
->reqseq
= 0;
2061 control
->txseq
= chan
->next_tx_seq
;
2063 __pack_control(chan
, control
, skb
);
2065 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2066 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2067 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2070 l2cap_do_send(chan
, skb
);
2072 BT_DBG("Sent txseq %u", control
->txseq
);
2074 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2075 chan
->frames_sent
++;
2079 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
2081 struct sk_buff
*skb
, *tx_skb
;
2082 struct l2cap_ctrl
*control
;
2085 BT_DBG("chan %p", chan
);
2087 if (chan
->state
!= BT_CONNECTED
)
2090 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2093 if (__chan_is_moving(chan
))
2096 while (chan
->tx_send_head
&&
2097 chan
->unacked_frames
< chan
->remote_tx_win
&&
2098 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
2100 skb
= chan
->tx_send_head
;
2102 bt_cb(skb
)->control
.retries
= 1;
2103 control
= &bt_cb(skb
)->control
;
2105 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2108 control
->reqseq
= chan
->buffer_seq
;
2109 chan
->last_acked_seq
= chan
->buffer_seq
;
2110 control
->txseq
= chan
->next_tx_seq
;
2112 __pack_control(chan
, control
, skb
);
2114 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2115 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2116 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2119 /* Clone after data has been modified. Data is assumed to be
2120 read-only (for locking purposes) on cloned sk_buffs.
2122 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2127 __set_retrans_timer(chan
);
2129 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2130 chan
->unacked_frames
++;
2131 chan
->frames_sent
++;
2134 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2135 chan
->tx_send_head
= NULL
;
2137 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2139 l2cap_do_send(chan
, tx_skb
);
2140 BT_DBG("Sent txseq %u", control
->txseq
);
2143 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2144 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2149 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2151 struct l2cap_ctrl control
;
2152 struct sk_buff
*skb
;
2153 struct sk_buff
*tx_skb
;
2156 BT_DBG("chan %p", chan
);
2158 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2161 if (__chan_is_moving(chan
))
2164 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2165 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2167 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2169 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2174 bt_cb(skb
)->control
.retries
++;
2175 control
= bt_cb(skb
)->control
;
2177 if (chan
->max_tx
!= 0 &&
2178 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2179 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2180 l2cap_send_disconn_req(chan
, ECONNRESET
);
2181 l2cap_seq_list_clear(&chan
->retrans_list
);
2185 control
.reqseq
= chan
->buffer_seq
;
2186 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2191 if (skb_cloned(skb
)) {
2192 /* Cloned sk_buffs are read-only, so we need a
2195 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2197 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2201 l2cap_seq_list_clear(&chan
->retrans_list
);
2205 /* Update skb contents */
2206 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2207 put_unaligned_le32(__pack_extended_control(&control
),
2208 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2210 put_unaligned_le16(__pack_enhanced_control(&control
),
2211 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2214 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2215 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2216 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2220 l2cap_do_send(chan
, tx_skb
);
2222 BT_DBG("Resent txseq %d", control
.txseq
);
2224 chan
->last_acked_seq
= chan
->buffer_seq
;
2228 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2229 struct l2cap_ctrl
*control
)
2231 BT_DBG("chan %p, control %p", chan
, control
);
2233 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2234 l2cap_ertm_resend(chan
);
2237 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2238 struct l2cap_ctrl
*control
)
2240 struct sk_buff
*skb
;
2242 BT_DBG("chan %p, control %p", chan
, control
);
2245 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2247 l2cap_seq_list_clear(&chan
->retrans_list
);
2249 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2252 if (chan
->unacked_frames
) {
2253 skb_queue_walk(&chan
->tx_q
, skb
) {
2254 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2255 skb
== chan
->tx_send_head
)
2259 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2260 if (skb
== chan
->tx_send_head
)
2263 l2cap_seq_list_append(&chan
->retrans_list
,
2264 bt_cb(skb
)->control
.txseq
);
2267 l2cap_ertm_resend(chan
);
2271 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2273 struct l2cap_ctrl control
;
2274 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2275 chan
->last_acked_seq
);
2278 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2279 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2281 memset(&control
, 0, sizeof(control
));
2284 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2285 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2286 __clear_ack_timer(chan
);
2287 control
.super
= L2CAP_SUPER_RNR
;
2288 control
.reqseq
= chan
->buffer_seq
;
2289 l2cap_send_sframe(chan
, &control
);
2291 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2292 l2cap_ertm_send(chan
);
2293 /* If any i-frames were sent, they included an ack */
2294 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2298 /* Ack now if the window is 3/4ths full.
2299 * Calculate without mul or div
2301 threshold
= chan
->ack_win
;
2302 threshold
+= threshold
<< 1;
2305 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2308 if (frames_to_ack
>= threshold
) {
2309 __clear_ack_timer(chan
);
2310 control
.super
= L2CAP_SUPER_RR
;
2311 control
.reqseq
= chan
->buffer_seq
;
2312 l2cap_send_sframe(chan
, &control
);
2317 __set_ack_timer(chan
);
2321 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2322 struct msghdr
*msg
, int len
,
2323 int count
, struct sk_buff
*skb
)
2325 struct l2cap_conn
*conn
= chan
->conn
;
2326 struct sk_buff
**frag
;
2329 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2335 /* Continuation fragments (no L2CAP header) */
2336 frag
= &skb_shinfo(skb
)->frag_list
;
2338 struct sk_buff
*tmp
;
2340 count
= min_t(unsigned int, conn
->mtu
, len
);
2342 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2343 msg
->msg_flags
& MSG_DONTWAIT
);
2345 return PTR_ERR(tmp
);
2349 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2352 (*frag
)->priority
= skb
->priority
;
2357 skb
->len
+= (*frag
)->len
;
2358 skb
->data_len
+= (*frag
)->len
;
2360 frag
= &(*frag
)->next
;
2366 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2367 struct msghdr
*msg
, size_t len
,
2370 struct l2cap_conn
*conn
= chan
->conn
;
2371 struct sk_buff
*skb
;
2372 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2373 struct l2cap_hdr
*lh
;
2375 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2376 __le16_to_cpu(chan
->psm
), len
, priority
);
2378 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2380 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2381 msg
->msg_flags
& MSG_DONTWAIT
);
2385 skb
->priority
= priority
;
2387 /* Create L2CAP header */
2388 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2389 lh
->cid
= cpu_to_le16(chan
->dcid
);
2390 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2391 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2393 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2394 if (unlikely(err
< 0)) {
2396 return ERR_PTR(err
);
2401 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2402 struct msghdr
*msg
, size_t len
,
2405 struct l2cap_conn
*conn
= chan
->conn
;
2406 struct sk_buff
*skb
;
2408 struct l2cap_hdr
*lh
;
2410 BT_DBG("chan %p len %zu", chan
, len
);
2412 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2414 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2415 msg
->msg_flags
& MSG_DONTWAIT
);
2419 skb
->priority
= priority
;
2421 /* Create L2CAP header */
2422 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2423 lh
->cid
= cpu_to_le16(chan
->dcid
);
2424 lh
->len
= cpu_to_le16(len
);
2426 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2427 if (unlikely(err
< 0)) {
2429 return ERR_PTR(err
);
2434 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2435 struct msghdr
*msg
, size_t len
,
2438 struct l2cap_conn
*conn
= chan
->conn
;
2439 struct sk_buff
*skb
;
2440 int err
, count
, hlen
;
2441 struct l2cap_hdr
*lh
;
2443 BT_DBG("chan %p len %zu", chan
, len
);
2446 return ERR_PTR(-ENOTCONN
);
2448 hlen
= __ertm_hdr_size(chan
);
2451 hlen
+= L2CAP_SDULEN_SIZE
;
2453 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2454 hlen
+= L2CAP_FCS_SIZE
;
2456 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2458 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2459 msg
->msg_flags
& MSG_DONTWAIT
);
2463 /* Create L2CAP header */
2464 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2465 lh
->cid
= cpu_to_le16(chan
->dcid
);
2466 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2468 /* Control header is populated later */
2469 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2470 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2472 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2475 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2477 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2478 if (unlikely(err
< 0)) {
2480 return ERR_PTR(err
);
2483 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2484 bt_cb(skb
)->control
.retries
= 0;
2488 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2489 struct sk_buff_head
*seg_queue
,
2490 struct msghdr
*msg
, size_t len
)
2492 struct sk_buff
*skb
;
2497 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2499 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2500 * so fragmented skbs are not used. The HCI layer's handling
2501 * of fragmented skbs is not compatible with ERTM's queueing.
2504 /* PDU size is derived from the HCI MTU */
2505 pdu_len
= chan
->conn
->mtu
;
2507 /* Constrain PDU size for BR/EDR connections */
2509 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2511 /* Adjust for largest possible L2CAP overhead. */
2513 pdu_len
-= L2CAP_FCS_SIZE
;
2515 pdu_len
-= __ertm_hdr_size(chan
);
2517 /* Remote device may have requested smaller PDUs */
2518 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2520 if (len
<= pdu_len
) {
2521 sar
= L2CAP_SAR_UNSEGMENTED
;
2525 sar
= L2CAP_SAR_START
;
2527 pdu_len
-= L2CAP_SDULEN_SIZE
;
2531 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2534 __skb_queue_purge(seg_queue
);
2535 return PTR_ERR(skb
);
2538 bt_cb(skb
)->control
.sar
= sar
;
2539 __skb_queue_tail(seg_queue
, skb
);
2544 pdu_len
+= L2CAP_SDULEN_SIZE
;
2547 if (len
<= pdu_len
) {
2548 sar
= L2CAP_SAR_END
;
2551 sar
= L2CAP_SAR_CONTINUE
;
2558 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2560 size_t len
, u16 sdulen
)
2562 struct l2cap_conn
*conn
= chan
->conn
;
2563 struct sk_buff
*skb
;
2564 int err
, count
, hlen
;
2565 struct l2cap_hdr
*lh
;
2567 BT_DBG("chan %p len %zu", chan
, len
);
2570 return ERR_PTR(-ENOTCONN
);
2572 hlen
= L2CAP_HDR_SIZE
;
2575 hlen
+= L2CAP_SDULEN_SIZE
;
2577 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2579 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2580 msg
->msg_flags
& MSG_DONTWAIT
);
2584 /* Create L2CAP header */
2585 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2586 lh
->cid
= cpu_to_le16(chan
->dcid
);
2587 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2590 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2592 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2593 if (unlikely(err
< 0)) {
2595 return ERR_PTR(err
);
2601 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2602 struct sk_buff_head
*seg_queue
,
2603 struct msghdr
*msg
, size_t len
)
2605 struct sk_buff
*skb
;
2609 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2611 pdu_len
= chan
->conn
->mtu
- L2CAP_HDR_SIZE
;
2613 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2616 pdu_len
-= L2CAP_SDULEN_SIZE
;
2622 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2624 __skb_queue_purge(seg_queue
);
2625 return PTR_ERR(skb
);
2628 __skb_queue_tail(seg_queue
, skb
);
2634 pdu_len
+= L2CAP_SDULEN_SIZE
;
2641 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2644 struct sk_buff
*skb
;
2646 struct sk_buff_head seg_queue
;
2651 /* Connectionless channel */
2652 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2653 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2655 return PTR_ERR(skb
);
2657 l2cap_do_send(chan
, skb
);
2661 switch (chan
->mode
) {
2662 case L2CAP_MODE_LE_FLOWCTL
:
2663 /* Check outgoing MTU */
2664 if (len
> chan
->omtu
)
2667 if (!chan
->tx_credits
)
2670 __skb_queue_head_init(&seg_queue
);
2672 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2674 if (chan
->state
!= BT_CONNECTED
) {
2675 __skb_queue_purge(&seg_queue
);
2682 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2684 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2685 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2689 if (!chan
->tx_credits
)
2690 chan
->ops
->suspend(chan
);
2696 case L2CAP_MODE_BASIC
:
2697 /* Check outgoing MTU */
2698 if (len
> chan
->omtu
)
2701 /* Create a basic PDU */
2702 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2704 return PTR_ERR(skb
);
2706 l2cap_do_send(chan
, skb
);
2710 case L2CAP_MODE_ERTM
:
2711 case L2CAP_MODE_STREAMING
:
2712 /* Check outgoing MTU */
2713 if (len
> chan
->omtu
) {
2718 __skb_queue_head_init(&seg_queue
);
2720 /* Do segmentation before calling in to the state machine,
2721 * since it's possible to block while waiting for memory
2724 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2726 /* The channel could have been closed while segmenting,
2727 * check that it is still connected.
2729 if (chan
->state
!= BT_CONNECTED
) {
2730 __skb_queue_purge(&seg_queue
);
2737 if (chan
->mode
== L2CAP_MODE_ERTM
)
2738 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2740 l2cap_streaming_send(chan
, &seg_queue
);
2744 /* If the skbs were not queued for sending, they'll still be in
2745 * seg_queue and need to be purged.
2747 __skb_queue_purge(&seg_queue
);
2751 BT_DBG("bad state %1.1x", chan
->mode
);
2758 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2760 struct l2cap_ctrl control
;
2763 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2765 memset(&control
, 0, sizeof(control
));
2767 control
.super
= L2CAP_SUPER_SREJ
;
2769 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2770 seq
= __next_seq(chan
, seq
)) {
2771 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2772 control
.reqseq
= seq
;
2773 l2cap_send_sframe(chan
, &control
);
2774 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2778 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2781 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2783 struct l2cap_ctrl control
;
2785 BT_DBG("chan %p", chan
);
2787 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2790 memset(&control
, 0, sizeof(control
));
2792 control
.super
= L2CAP_SUPER_SREJ
;
2793 control
.reqseq
= chan
->srej_list
.tail
;
2794 l2cap_send_sframe(chan
, &control
);
2797 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2799 struct l2cap_ctrl control
;
2803 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2805 memset(&control
, 0, sizeof(control
));
2807 control
.super
= L2CAP_SUPER_SREJ
;
2809 /* Capture initial list head to allow only one pass through the list. */
2810 initial_head
= chan
->srej_list
.head
;
2813 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2814 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2817 control
.reqseq
= seq
;
2818 l2cap_send_sframe(chan
, &control
);
2819 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2820 } while (chan
->srej_list
.head
!= initial_head
);
2823 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2825 struct sk_buff
*acked_skb
;
2828 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2830 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2833 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2834 chan
->expected_ack_seq
, chan
->unacked_frames
);
2836 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2837 ackseq
= __next_seq(chan
, ackseq
)) {
2839 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2841 skb_unlink(acked_skb
, &chan
->tx_q
);
2842 kfree_skb(acked_skb
);
2843 chan
->unacked_frames
--;
2847 chan
->expected_ack_seq
= reqseq
;
2849 if (chan
->unacked_frames
== 0)
2850 __clear_retrans_timer(chan
);
2852 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2855 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2857 BT_DBG("chan %p", chan
);
2859 chan
->expected_tx_seq
= chan
->buffer_seq
;
2860 l2cap_seq_list_clear(&chan
->srej_list
);
2861 skb_queue_purge(&chan
->srej_q
);
2862 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2865 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2866 struct l2cap_ctrl
*control
,
2867 struct sk_buff_head
*skbs
, u8 event
)
2869 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2873 case L2CAP_EV_DATA_REQUEST
:
2874 if (chan
->tx_send_head
== NULL
)
2875 chan
->tx_send_head
= skb_peek(skbs
);
2877 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2878 l2cap_ertm_send(chan
);
2880 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2881 BT_DBG("Enter LOCAL_BUSY");
2882 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2884 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2885 /* The SREJ_SENT state must be aborted if we are to
2886 * enter the LOCAL_BUSY state.
2888 l2cap_abort_rx_srej_sent(chan
);
2891 l2cap_send_ack(chan
);
2894 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2895 BT_DBG("Exit LOCAL_BUSY");
2896 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2898 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2899 struct l2cap_ctrl local_control
;
2901 memset(&local_control
, 0, sizeof(local_control
));
2902 local_control
.sframe
= 1;
2903 local_control
.super
= L2CAP_SUPER_RR
;
2904 local_control
.poll
= 1;
2905 local_control
.reqseq
= chan
->buffer_seq
;
2906 l2cap_send_sframe(chan
, &local_control
);
2908 chan
->retry_count
= 1;
2909 __set_monitor_timer(chan
);
2910 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2913 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2914 l2cap_process_reqseq(chan
, control
->reqseq
);
2916 case L2CAP_EV_EXPLICIT_POLL
:
2917 l2cap_send_rr_or_rnr(chan
, 1);
2918 chan
->retry_count
= 1;
2919 __set_monitor_timer(chan
);
2920 __clear_ack_timer(chan
);
2921 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2923 case L2CAP_EV_RETRANS_TO
:
2924 l2cap_send_rr_or_rnr(chan
, 1);
2925 chan
->retry_count
= 1;
2926 __set_monitor_timer(chan
);
2927 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2929 case L2CAP_EV_RECV_FBIT
:
2930 /* Nothing to process */
2937 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2938 struct l2cap_ctrl
*control
,
2939 struct sk_buff_head
*skbs
, u8 event
)
2941 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2945 case L2CAP_EV_DATA_REQUEST
:
2946 if (chan
->tx_send_head
== NULL
)
2947 chan
->tx_send_head
= skb_peek(skbs
);
2948 /* Queue data, but don't send. */
2949 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2951 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2952 BT_DBG("Enter LOCAL_BUSY");
2953 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2955 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2956 /* The SREJ_SENT state must be aborted if we are to
2957 * enter the LOCAL_BUSY state.
2959 l2cap_abort_rx_srej_sent(chan
);
2962 l2cap_send_ack(chan
);
2965 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2966 BT_DBG("Exit LOCAL_BUSY");
2967 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2969 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2970 struct l2cap_ctrl local_control
;
2971 memset(&local_control
, 0, sizeof(local_control
));
2972 local_control
.sframe
= 1;
2973 local_control
.super
= L2CAP_SUPER_RR
;
2974 local_control
.poll
= 1;
2975 local_control
.reqseq
= chan
->buffer_seq
;
2976 l2cap_send_sframe(chan
, &local_control
);
2978 chan
->retry_count
= 1;
2979 __set_monitor_timer(chan
);
2980 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2983 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2984 l2cap_process_reqseq(chan
, control
->reqseq
);
2988 case L2CAP_EV_RECV_FBIT
:
2989 if (control
&& control
->final
) {
2990 __clear_monitor_timer(chan
);
2991 if (chan
->unacked_frames
> 0)
2992 __set_retrans_timer(chan
);
2993 chan
->retry_count
= 0;
2994 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2995 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2998 case L2CAP_EV_EXPLICIT_POLL
:
3001 case L2CAP_EV_MONITOR_TO
:
3002 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
3003 l2cap_send_rr_or_rnr(chan
, 1);
3004 __set_monitor_timer(chan
);
3005 chan
->retry_count
++;
3007 l2cap_send_disconn_req(chan
, ECONNABORTED
);
3015 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
3016 struct sk_buff_head
*skbs
, u8 event
)
3018 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3019 chan
, control
, skbs
, event
, chan
->tx_state
);
3021 switch (chan
->tx_state
) {
3022 case L2CAP_TX_STATE_XMIT
:
3023 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
3025 case L2CAP_TX_STATE_WAIT_F
:
3026 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
3034 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
3035 struct l2cap_ctrl
*control
)
3037 BT_DBG("chan %p, control %p", chan
, control
);
3038 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
3041 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
3042 struct l2cap_ctrl
*control
)
3044 BT_DBG("chan %p, control %p", chan
, control
);
3045 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
3048 /* Copy frame to all raw sockets on that connection */
3049 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3051 struct sk_buff
*nskb
;
3052 struct l2cap_chan
*chan
;
3054 BT_DBG("conn %p", conn
);
3056 mutex_lock(&conn
->chan_lock
);
3058 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
3059 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
3062 /* Don't send frame to the channel it came from */
3063 if (bt_cb(skb
)->chan
== chan
)
3066 nskb
= skb_clone(skb
, GFP_KERNEL
);
3069 if (chan
->ops
->recv(chan
, nskb
))
3073 mutex_unlock(&conn
->chan_lock
);
3076 /* ---- L2CAP signalling commands ---- */
3077 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
3078 u8 ident
, u16 dlen
, void *data
)
3080 struct sk_buff
*skb
, **frag
;
3081 struct l2cap_cmd_hdr
*cmd
;
3082 struct l2cap_hdr
*lh
;
3085 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3086 conn
, code
, ident
, dlen
);
3088 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
3091 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
3092 count
= min_t(unsigned int, conn
->mtu
, len
);
3094 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
3098 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
3099 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
3101 if (conn
->hcon
->type
== LE_LINK
)
3102 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
3104 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
3106 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
3109 cmd
->len
= cpu_to_le16(dlen
);
3112 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
3113 memcpy(skb_put(skb
, count
), data
, count
);
3119 /* Continuation fragments (no L2CAP header) */
3120 frag
= &skb_shinfo(skb
)->frag_list
;
3122 count
= min_t(unsigned int, conn
->mtu
, len
);
3124 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
3128 memcpy(skb_put(*frag
, count
), data
, count
);
3133 frag
= &(*frag
)->next
;
3143 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
3146 struct l2cap_conf_opt
*opt
= *ptr
;
3149 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
3157 *val
= *((u8
*) opt
->val
);
3161 *val
= get_unaligned_le16(opt
->val
);
3165 *val
= get_unaligned_le32(opt
->val
);
3169 *val
= (unsigned long) opt
->val
;
3173 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3177 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
3179 struct l2cap_conf_opt
*opt
= *ptr
;
3181 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3188 *((u8
*) opt
->val
) = val
;
3192 put_unaligned_le16(val
, opt
->val
);
3196 put_unaligned_le32(val
, opt
->val
);
3200 memcpy(opt
->val
, (void *) val
, len
);
3204 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3207 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3209 struct l2cap_conf_efs efs
;
3211 switch (chan
->mode
) {
3212 case L2CAP_MODE_ERTM
:
3213 efs
.id
= chan
->local_id
;
3214 efs
.stype
= chan
->local_stype
;
3215 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3216 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3217 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3218 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3221 case L2CAP_MODE_STREAMING
:
3223 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3224 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3225 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3234 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3235 (unsigned long) &efs
);
3238 static void l2cap_ack_timeout(struct work_struct
*work
)
3240 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3244 BT_DBG("chan %p", chan
);
3246 l2cap_chan_lock(chan
);
3248 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3249 chan
->last_acked_seq
);
3252 l2cap_send_rr_or_rnr(chan
, 0);
3254 l2cap_chan_unlock(chan
);
3255 l2cap_chan_put(chan
);
3258 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3262 chan
->next_tx_seq
= 0;
3263 chan
->expected_tx_seq
= 0;
3264 chan
->expected_ack_seq
= 0;
3265 chan
->unacked_frames
= 0;
3266 chan
->buffer_seq
= 0;
3267 chan
->frames_sent
= 0;
3268 chan
->last_acked_seq
= 0;
3270 chan
->sdu_last_frag
= NULL
;
3273 skb_queue_head_init(&chan
->tx_q
);
3275 chan
->local_amp_id
= AMP_ID_BREDR
;
3276 chan
->move_id
= AMP_ID_BREDR
;
3277 chan
->move_state
= L2CAP_MOVE_STABLE
;
3278 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3280 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3283 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3284 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3286 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3287 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3288 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3290 skb_queue_head_init(&chan
->srej_q
);
3292 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3296 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3298 l2cap_seq_list_free(&chan
->srej_list
);
3303 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3306 case L2CAP_MODE_STREAMING
:
3307 case L2CAP_MODE_ERTM
:
3308 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3312 return L2CAP_MODE_BASIC
;
3316 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3318 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3321 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3323 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3326 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3327 struct l2cap_conf_rfc
*rfc
)
3329 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3330 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3332 /* Class 1 devices have must have ERTM timeouts
3333 * exceeding the Link Supervision Timeout. The
3334 * default Link Supervision Timeout for AMP
3335 * controllers is 10 seconds.
3337 * Class 1 devices use 0xffffffff for their
3338 * best-effort flush timeout, so the clamping logic
3339 * will result in a timeout that meets the above
3340 * requirement. ERTM timeouts are 16-bit values, so
3341 * the maximum timeout is 65.535 seconds.
3344 /* Convert timeout to milliseconds and round */
3345 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3347 /* This is the recommended formula for class 2 devices
3348 * that start ERTM timers when packets are sent to the
3351 ertm_to
= 3 * ertm_to
+ 500;
3353 if (ertm_to
> 0xffff)
3356 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3357 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3359 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3360 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3364 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3366 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3367 __l2cap_ews_supported(chan
->conn
)) {
3368 /* use extended control field */
3369 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3370 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3372 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3373 L2CAP_DEFAULT_TX_WINDOW
);
3374 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3376 chan
->ack_win
= chan
->tx_win
;
3379 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3381 struct l2cap_conf_req
*req
= data
;
3382 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3383 void *ptr
= req
->data
;
3386 BT_DBG("chan %p", chan
);
3388 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3391 switch (chan
->mode
) {
3392 case L2CAP_MODE_STREAMING
:
3393 case L2CAP_MODE_ERTM
:
3394 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3397 if (__l2cap_efs_supported(chan
->conn
))
3398 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3402 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3407 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3408 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3410 switch (chan
->mode
) {
3411 case L2CAP_MODE_BASIC
:
3412 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3413 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3416 rfc
.mode
= L2CAP_MODE_BASIC
;
3418 rfc
.max_transmit
= 0;
3419 rfc
.retrans_timeout
= 0;
3420 rfc
.monitor_timeout
= 0;
3421 rfc
.max_pdu_size
= 0;
3423 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3424 (unsigned long) &rfc
);
3427 case L2CAP_MODE_ERTM
:
3428 rfc
.mode
= L2CAP_MODE_ERTM
;
3429 rfc
.max_transmit
= chan
->max_tx
;
3431 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3433 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3434 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3436 rfc
.max_pdu_size
= cpu_to_le16(size
);
3438 l2cap_txwin_setup(chan
);
3440 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3441 L2CAP_DEFAULT_TX_WINDOW
);
3443 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3444 (unsigned long) &rfc
);
3446 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3447 l2cap_add_opt_efs(&ptr
, chan
);
3449 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3450 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3453 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3454 if (chan
->fcs
== L2CAP_FCS_NONE
||
3455 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3456 chan
->fcs
= L2CAP_FCS_NONE
;
3457 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3462 case L2CAP_MODE_STREAMING
:
3463 l2cap_txwin_setup(chan
);
3464 rfc
.mode
= L2CAP_MODE_STREAMING
;
3466 rfc
.max_transmit
= 0;
3467 rfc
.retrans_timeout
= 0;
3468 rfc
.monitor_timeout
= 0;
3470 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3471 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3473 rfc
.max_pdu_size
= cpu_to_le16(size
);
3475 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3476 (unsigned long) &rfc
);
3478 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3479 l2cap_add_opt_efs(&ptr
, chan
);
3481 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3482 if (chan
->fcs
== L2CAP_FCS_NONE
||
3483 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3484 chan
->fcs
= L2CAP_FCS_NONE
;
3485 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3491 req
->dcid
= cpu_to_le16(chan
->dcid
);
3492 req
->flags
= __constant_cpu_to_le16(0);
3497 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3499 struct l2cap_conf_rsp
*rsp
= data
;
3500 void *ptr
= rsp
->data
;
3501 void *req
= chan
->conf_req
;
3502 int len
= chan
->conf_len
;
3503 int type
, hint
, olen
;
3505 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3506 struct l2cap_conf_efs efs
;
3508 u16 mtu
= L2CAP_DEFAULT_MTU
;
3509 u16 result
= L2CAP_CONF_SUCCESS
;
3512 BT_DBG("chan %p", chan
);
3514 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3515 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3517 hint
= type
& L2CAP_CONF_HINT
;
3518 type
&= L2CAP_CONF_MASK
;
3521 case L2CAP_CONF_MTU
:
3525 case L2CAP_CONF_FLUSH_TO
:
3526 chan
->flush_to
= val
;
3529 case L2CAP_CONF_QOS
:
3532 case L2CAP_CONF_RFC
:
3533 if (olen
== sizeof(rfc
))
3534 memcpy(&rfc
, (void *) val
, olen
);
3537 case L2CAP_CONF_FCS
:
3538 if (val
== L2CAP_FCS_NONE
)
3539 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3542 case L2CAP_CONF_EFS
:
3544 if (olen
== sizeof(efs
))
3545 memcpy(&efs
, (void *) val
, olen
);
3548 case L2CAP_CONF_EWS
:
3549 if (!chan
->conn
->hs_enabled
)
3550 return -ECONNREFUSED
;
3552 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3553 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3554 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3555 chan
->remote_tx_win
= val
;
3562 result
= L2CAP_CONF_UNKNOWN
;
3563 *((u8
*) ptr
++) = type
;
3568 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3571 switch (chan
->mode
) {
3572 case L2CAP_MODE_STREAMING
:
3573 case L2CAP_MODE_ERTM
:
3574 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3575 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3576 chan
->conn
->feat_mask
);
3581 if (__l2cap_efs_supported(chan
->conn
))
3582 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3584 return -ECONNREFUSED
;
3587 if (chan
->mode
!= rfc
.mode
)
3588 return -ECONNREFUSED
;
3594 if (chan
->mode
!= rfc
.mode
) {
3595 result
= L2CAP_CONF_UNACCEPT
;
3596 rfc
.mode
= chan
->mode
;
3598 if (chan
->num_conf_rsp
== 1)
3599 return -ECONNREFUSED
;
3601 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3602 (unsigned long) &rfc
);
3605 if (result
== L2CAP_CONF_SUCCESS
) {
3606 /* Configure output options and let the other side know
3607 * which ones we don't like. */
3609 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3610 result
= L2CAP_CONF_UNACCEPT
;
3613 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3615 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3618 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3619 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3620 efs
.stype
!= chan
->local_stype
) {
3622 result
= L2CAP_CONF_UNACCEPT
;
3624 if (chan
->num_conf_req
>= 1)
3625 return -ECONNREFUSED
;
3627 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3629 (unsigned long) &efs
);
3631 /* Send PENDING Conf Rsp */
3632 result
= L2CAP_CONF_PENDING
;
3633 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3638 case L2CAP_MODE_BASIC
:
3639 chan
->fcs
= L2CAP_FCS_NONE
;
3640 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3643 case L2CAP_MODE_ERTM
:
3644 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3645 chan
->remote_tx_win
= rfc
.txwin_size
;
3647 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3649 chan
->remote_max_tx
= rfc
.max_transmit
;
3651 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3652 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3653 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3654 rfc
.max_pdu_size
= cpu_to_le16(size
);
3655 chan
->remote_mps
= size
;
3657 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3659 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3661 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3662 sizeof(rfc
), (unsigned long) &rfc
);
3664 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3665 chan
->remote_id
= efs
.id
;
3666 chan
->remote_stype
= efs
.stype
;
3667 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3668 chan
->remote_flush_to
=
3669 le32_to_cpu(efs
.flush_to
);
3670 chan
->remote_acc_lat
=
3671 le32_to_cpu(efs
.acc_lat
);
3672 chan
->remote_sdu_itime
=
3673 le32_to_cpu(efs
.sdu_itime
);
3674 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3676 (unsigned long) &efs
);
3680 case L2CAP_MODE_STREAMING
:
3681 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3682 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3683 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3684 rfc
.max_pdu_size
= cpu_to_le16(size
);
3685 chan
->remote_mps
= size
;
3687 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3689 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3690 (unsigned long) &rfc
);
3695 result
= L2CAP_CONF_UNACCEPT
;
3697 memset(&rfc
, 0, sizeof(rfc
));
3698 rfc
.mode
= chan
->mode
;
3701 if (result
== L2CAP_CONF_SUCCESS
)
3702 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3704 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3705 rsp
->result
= cpu_to_le16(result
);
3706 rsp
->flags
= __constant_cpu_to_le16(0);
3711 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3712 void *data
, u16
*result
)
3714 struct l2cap_conf_req
*req
= data
;
3715 void *ptr
= req
->data
;
3718 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3719 struct l2cap_conf_efs efs
;
3721 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3723 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3724 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3727 case L2CAP_CONF_MTU
:
3728 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3729 *result
= L2CAP_CONF_UNACCEPT
;
3730 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3733 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3736 case L2CAP_CONF_FLUSH_TO
:
3737 chan
->flush_to
= val
;
3738 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3742 case L2CAP_CONF_RFC
:
3743 if (olen
== sizeof(rfc
))
3744 memcpy(&rfc
, (void *)val
, olen
);
3746 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3747 rfc
.mode
!= chan
->mode
)
3748 return -ECONNREFUSED
;
3752 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3753 sizeof(rfc
), (unsigned long) &rfc
);
3756 case L2CAP_CONF_EWS
:
3757 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3758 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3762 case L2CAP_CONF_EFS
:
3763 if (olen
== sizeof(efs
))
3764 memcpy(&efs
, (void *)val
, olen
);
3766 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3767 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3768 efs
.stype
!= chan
->local_stype
)
3769 return -ECONNREFUSED
;
3771 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3772 (unsigned long) &efs
);
3775 case L2CAP_CONF_FCS
:
3776 if (*result
== L2CAP_CONF_PENDING
)
3777 if (val
== L2CAP_FCS_NONE
)
3778 set_bit(CONF_RECV_NO_FCS
,
3784 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3785 return -ECONNREFUSED
;
3787 chan
->mode
= rfc
.mode
;
3789 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3791 case L2CAP_MODE_ERTM
:
3792 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3793 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3794 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3795 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3796 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3799 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3800 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3801 chan
->local_sdu_itime
=
3802 le32_to_cpu(efs
.sdu_itime
);
3803 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3804 chan
->local_flush_to
=
3805 le32_to_cpu(efs
.flush_to
);
3809 case L2CAP_MODE_STREAMING
:
3810 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3814 req
->dcid
= cpu_to_le16(chan
->dcid
);
3815 req
->flags
= __constant_cpu_to_le16(0);
3820 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3821 u16 result
, u16 flags
)
3823 struct l2cap_conf_rsp
*rsp
= data
;
3824 void *ptr
= rsp
->data
;
3826 BT_DBG("chan %p", chan
);
3828 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3829 rsp
->result
= cpu_to_le16(result
);
3830 rsp
->flags
= cpu_to_le16(flags
);
3835 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3837 struct l2cap_le_conn_rsp rsp
;
3838 struct l2cap_conn
*conn
= chan
->conn
;
3840 BT_DBG("chan %p", chan
);
3842 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3843 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3844 rsp
.mps
= cpu_to_le16(chan
->mps
);
3845 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3846 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3848 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3852 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3854 struct l2cap_conn_rsp rsp
;
3855 struct l2cap_conn
*conn
= chan
->conn
;
3859 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3860 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3861 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3862 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3865 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3867 rsp_code
= L2CAP_CONN_RSP
;
3869 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3871 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3873 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3876 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3877 l2cap_build_conf_req(chan
, buf
), buf
);
3878 chan
->num_conf_req
++;
3881 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3885 /* Use sane default values in case a misbehaving remote device
3886 * did not send an RFC or extended window size option.
3888 u16 txwin_ext
= chan
->ack_win
;
3889 struct l2cap_conf_rfc rfc
= {
3891 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3892 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3893 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3894 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3897 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3899 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3902 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3903 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3906 case L2CAP_CONF_RFC
:
3907 if (olen
== sizeof(rfc
))
3908 memcpy(&rfc
, (void *)val
, olen
);
3910 case L2CAP_CONF_EWS
:
3917 case L2CAP_MODE_ERTM
:
3918 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3919 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3920 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3921 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3922 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3924 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3927 case L2CAP_MODE_STREAMING
:
3928 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3932 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3933 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3936 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3938 if (cmd_len
< sizeof(*rej
))
3941 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3944 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3945 cmd
->ident
== conn
->info_ident
) {
3946 cancel_delayed_work(&conn
->info_timer
);
3948 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3949 conn
->info_ident
= 0;
3951 l2cap_conn_start(conn
);
3957 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3958 struct l2cap_cmd_hdr
*cmd
,
3959 u8
*data
, u8 rsp_code
, u8 amp_id
)
3961 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3962 struct l2cap_conn_rsp rsp
;
3963 struct l2cap_chan
*chan
= NULL
, *pchan
;
3964 int result
, status
= L2CAP_CS_NO_INFO
;
3966 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3967 __le16 psm
= req
->psm
;
3969 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3971 /* Check if we have socket listening on psm */
3972 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3973 &conn
->hcon
->dst
, ACL_LINK
);
3975 result
= L2CAP_CR_BAD_PSM
;
3979 mutex_lock(&conn
->chan_lock
);
3980 l2cap_chan_lock(pchan
);
3982 /* Check if the ACL is secure enough (if not SDP) */
3983 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3984 !hci_conn_check_link_mode(conn
->hcon
)) {
3985 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3986 result
= L2CAP_CR_SEC_BLOCK
;
3990 result
= L2CAP_CR_NO_MEM
;
3992 /* Check if we already have channel with that dcid */
3993 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3996 chan
= pchan
->ops
->new_connection(pchan
);
4000 /* For certain devices (ex: HID mouse), support for authentication,
4001 * pairing and bonding is optional. For such devices, inorder to avoid
4002 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4003 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4005 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
4007 bacpy(&chan
->src
, &conn
->hcon
->src
);
4008 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
4009 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
4010 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
4013 chan
->local_amp_id
= amp_id
;
4015 __l2cap_chan_add(conn
, chan
);
4019 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
4021 chan
->ident
= cmd
->ident
;
4023 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
4024 if (l2cap_chan_check_security(chan
)) {
4025 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
4026 l2cap_state_change(chan
, BT_CONNECT2
);
4027 result
= L2CAP_CR_PEND
;
4028 status
= L2CAP_CS_AUTHOR_PEND
;
4029 chan
->ops
->defer(chan
);
4031 /* Force pending result for AMP controllers.
4032 * The connection will succeed after the
4033 * physical link is up.
4035 if (amp_id
== AMP_ID_BREDR
) {
4036 l2cap_state_change(chan
, BT_CONFIG
);
4037 result
= L2CAP_CR_SUCCESS
;
4039 l2cap_state_change(chan
, BT_CONNECT2
);
4040 result
= L2CAP_CR_PEND
;
4042 status
= L2CAP_CS_NO_INFO
;
4045 l2cap_state_change(chan
, BT_CONNECT2
);
4046 result
= L2CAP_CR_PEND
;
4047 status
= L2CAP_CS_AUTHEN_PEND
;
4050 l2cap_state_change(chan
, BT_CONNECT2
);
4051 result
= L2CAP_CR_PEND
;
4052 status
= L2CAP_CS_NO_INFO
;
4056 l2cap_chan_unlock(pchan
);
4057 mutex_unlock(&conn
->chan_lock
);
4060 rsp
.scid
= cpu_to_le16(scid
);
4061 rsp
.dcid
= cpu_to_le16(dcid
);
4062 rsp
.result
= cpu_to_le16(result
);
4063 rsp
.status
= cpu_to_le16(status
);
4064 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
4066 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
4067 struct l2cap_info_req info
;
4068 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4070 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
4071 conn
->info_ident
= l2cap_get_ident(conn
);
4073 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
4075 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
4076 sizeof(info
), &info
);
4079 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
4080 result
== L2CAP_CR_SUCCESS
) {
4082 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4083 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4084 l2cap_build_conf_req(chan
, buf
), buf
);
4085 chan
->num_conf_req
++;
4091 static int l2cap_connect_req(struct l2cap_conn
*conn
,
4092 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4094 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
4095 struct hci_conn
*hcon
= conn
->hcon
;
4097 if (cmd_len
< sizeof(struct l2cap_conn_req
))
4101 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
4102 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
4103 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
4104 hcon
->dst_type
, 0, NULL
, 0,
4106 hci_dev_unlock(hdev
);
4108 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
4112 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
4113 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4116 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
4117 u16 scid
, dcid
, result
, status
;
4118 struct l2cap_chan
*chan
;
4122 if (cmd_len
< sizeof(*rsp
))
4125 scid
= __le16_to_cpu(rsp
->scid
);
4126 dcid
= __le16_to_cpu(rsp
->dcid
);
4127 result
= __le16_to_cpu(rsp
->result
);
4128 status
= __le16_to_cpu(rsp
->status
);
4130 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4131 dcid
, scid
, result
, status
);
4133 mutex_lock(&conn
->chan_lock
);
4136 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4142 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
4151 l2cap_chan_lock(chan
);
4154 case L2CAP_CR_SUCCESS
:
4155 l2cap_state_change(chan
, BT_CONFIG
);
4158 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4160 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
4163 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4164 l2cap_build_conf_req(chan
, req
), req
);
4165 chan
->num_conf_req
++;
4169 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4173 l2cap_chan_del(chan
, ECONNREFUSED
);
4177 l2cap_chan_unlock(chan
);
4180 mutex_unlock(&conn
->chan_lock
);
4185 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4187 /* FCS is enabled only in ERTM or streaming mode, if one or both
4190 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4191 chan
->fcs
= L2CAP_FCS_NONE
;
4192 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4193 chan
->fcs
= L2CAP_FCS_CRC16
;
4196 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4197 u8 ident
, u16 flags
)
4199 struct l2cap_conn
*conn
= chan
->conn
;
4201 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4204 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4205 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4207 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4208 l2cap_build_conf_rsp(chan
, data
,
4209 L2CAP_CONF_SUCCESS
, flags
), data
);
4212 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4215 struct l2cap_cmd_rej_cid rej
;
4217 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4218 rej
.scid
= __cpu_to_le16(scid
);
4219 rej
.dcid
= __cpu_to_le16(dcid
);
4221 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4224 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4225 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4228 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4231 struct l2cap_chan
*chan
;
4234 if (cmd_len
< sizeof(*req
))
4237 dcid
= __le16_to_cpu(req
->dcid
);
4238 flags
= __le16_to_cpu(req
->flags
);
4240 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4242 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4244 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4248 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4249 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4254 /* Reject if config buffer is too small. */
4255 len
= cmd_len
- sizeof(*req
);
4256 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4257 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4258 l2cap_build_conf_rsp(chan
, rsp
,
4259 L2CAP_CONF_REJECT
, flags
), rsp
);
4264 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4265 chan
->conf_len
+= len
;
4267 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4268 /* Incomplete config. Send empty response. */
4269 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4270 l2cap_build_conf_rsp(chan
, rsp
,
4271 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4275 /* Complete config. */
4276 len
= l2cap_parse_conf_req(chan
, rsp
);
4278 l2cap_send_disconn_req(chan
, ECONNRESET
);
4282 chan
->ident
= cmd
->ident
;
4283 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4284 chan
->num_conf_rsp
++;
4286 /* Reset config buffer. */
4289 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4292 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4293 set_default_fcs(chan
);
4295 if (chan
->mode
== L2CAP_MODE_ERTM
||
4296 chan
->mode
== L2CAP_MODE_STREAMING
)
4297 err
= l2cap_ertm_init(chan
);
4300 l2cap_send_disconn_req(chan
, -err
);
4302 l2cap_chan_ready(chan
);
4307 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4309 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4310 l2cap_build_conf_req(chan
, buf
), buf
);
4311 chan
->num_conf_req
++;
4314 /* Got Conf Rsp PENDING from remote side and asume we sent
4315 Conf Rsp PENDING in the code above */
4316 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4317 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4319 /* check compatibility */
4321 /* Send rsp for BR/EDR channel */
4323 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4325 chan
->ident
= cmd
->ident
;
4329 l2cap_chan_unlock(chan
);
4333 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4334 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4337 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4338 u16 scid
, flags
, result
;
4339 struct l2cap_chan
*chan
;
4340 int len
= cmd_len
- sizeof(*rsp
);
4343 if (cmd_len
< sizeof(*rsp
))
4346 scid
= __le16_to_cpu(rsp
->scid
);
4347 flags
= __le16_to_cpu(rsp
->flags
);
4348 result
= __le16_to_cpu(rsp
->result
);
4350 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4353 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4358 case L2CAP_CONF_SUCCESS
:
4359 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4360 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4363 case L2CAP_CONF_PENDING
:
4364 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4366 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4369 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4372 l2cap_send_disconn_req(chan
, ECONNRESET
);
4376 if (!chan
->hs_hcon
) {
4377 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4380 if (l2cap_check_efs(chan
)) {
4381 amp_create_logical_link(chan
);
4382 chan
->ident
= cmd
->ident
;
4388 case L2CAP_CONF_UNACCEPT
:
4389 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4392 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4393 l2cap_send_disconn_req(chan
, ECONNRESET
);
4397 /* throw out any old stored conf requests */
4398 result
= L2CAP_CONF_SUCCESS
;
4399 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4402 l2cap_send_disconn_req(chan
, ECONNRESET
);
4406 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4407 L2CAP_CONF_REQ
, len
, req
);
4408 chan
->num_conf_req
++;
4409 if (result
!= L2CAP_CONF_SUCCESS
)
4415 l2cap_chan_set_err(chan
, ECONNRESET
);
4417 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4418 l2cap_send_disconn_req(chan
, ECONNRESET
);
4422 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4425 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4427 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4428 set_default_fcs(chan
);
4430 if (chan
->mode
== L2CAP_MODE_ERTM
||
4431 chan
->mode
== L2CAP_MODE_STREAMING
)
4432 err
= l2cap_ertm_init(chan
);
4435 l2cap_send_disconn_req(chan
, -err
);
4437 l2cap_chan_ready(chan
);
4441 l2cap_chan_unlock(chan
);
4445 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4446 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4449 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4450 struct l2cap_disconn_rsp rsp
;
4452 struct l2cap_chan
*chan
;
4454 if (cmd_len
!= sizeof(*req
))
4457 scid
= __le16_to_cpu(req
->scid
);
4458 dcid
= __le16_to_cpu(req
->dcid
);
4460 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4462 mutex_lock(&conn
->chan_lock
);
4464 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4466 mutex_unlock(&conn
->chan_lock
);
4467 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4471 l2cap_chan_lock(chan
);
4473 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4474 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4475 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4477 chan
->ops
->set_shutdown(chan
);
4479 l2cap_chan_hold(chan
);
4480 l2cap_chan_del(chan
, ECONNRESET
);
4482 l2cap_chan_unlock(chan
);
4484 chan
->ops
->close(chan
);
4485 l2cap_chan_put(chan
);
4487 mutex_unlock(&conn
->chan_lock
);
4492 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4493 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4496 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4498 struct l2cap_chan
*chan
;
4500 if (cmd_len
!= sizeof(*rsp
))
4503 scid
= __le16_to_cpu(rsp
->scid
);
4504 dcid
= __le16_to_cpu(rsp
->dcid
);
4506 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4508 mutex_lock(&conn
->chan_lock
);
4510 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4512 mutex_unlock(&conn
->chan_lock
);
4516 l2cap_chan_lock(chan
);
4518 l2cap_chan_hold(chan
);
4519 l2cap_chan_del(chan
, 0);
4521 l2cap_chan_unlock(chan
);
4523 chan
->ops
->close(chan
);
4524 l2cap_chan_put(chan
);
4526 mutex_unlock(&conn
->chan_lock
);
4531 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4532 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4535 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4538 if (cmd_len
!= sizeof(*req
))
4541 type
= __le16_to_cpu(req
->type
);
4543 BT_DBG("type 0x%4.4x", type
);
4545 if (type
== L2CAP_IT_FEAT_MASK
) {
4547 u32 feat_mask
= l2cap_feat_mask
;
4548 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4549 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4550 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4552 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4554 if (conn
->hs_enabled
)
4555 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4556 | L2CAP_FEAT_EXT_WINDOW
;
4558 put_unaligned_le32(feat_mask
, rsp
->data
);
4559 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4561 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4563 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4565 if (conn
->hs_enabled
)
4566 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4568 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4570 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4571 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4572 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4573 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4576 struct l2cap_info_rsp rsp
;
4577 rsp
.type
= cpu_to_le16(type
);
4578 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4579 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4586 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4587 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4590 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4593 if (cmd_len
< sizeof(*rsp
))
4596 type
= __le16_to_cpu(rsp
->type
);
4597 result
= __le16_to_cpu(rsp
->result
);
4599 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4601 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4602 if (cmd
->ident
!= conn
->info_ident
||
4603 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4606 cancel_delayed_work(&conn
->info_timer
);
4608 if (result
!= L2CAP_IR_SUCCESS
) {
4609 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4610 conn
->info_ident
= 0;
4612 l2cap_conn_start(conn
);
4618 case L2CAP_IT_FEAT_MASK
:
4619 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4621 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4622 struct l2cap_info_req req
;
4623 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4625 conn
->info_ident
= l2cap_get_ident(conn
);
4627 l2cap_send_cmd(conn
, conn
->info_ident
,
4628 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4630 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4631 conn
->info_ident
= 0;
4633 l2cap_conn_start(conn
);
4637 case L2CAP_IT_FIXED_CHAN
:
4638 conn
->fixed_chan_mask
= rsp
->data
[0];
4639 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4640 conn
->info_ident
= 0;
4642 l2cap_conn_start(conn
);
4649 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4650 struct l2cap_cmd_hdr
*cmd
,
4651 u16 cmd_len
, void *data
)
4653 struct l2cap_create_chan_req
*req
= data
;
4654 struct l2cap_create_chan_rsp rsp
;
4655 struct l2cap_chan
*chan
;
4656 struct hci_dev
*hdev
;
4659 if (cmd_len
!= sizeof(*req
))
4662 if (!conn
->hs_enabled
)
4665 psm
= le16_to_cpu(req
->psm
);
4666 scid
= le16_to_cpu(req
->scid
);
4668 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4670 /* For controller id 0 make BR/EDR connection */
4671 if (req
->amp_id
== AMP_ID_BREDR
) {
4672 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4677 /* Validate AMP controller id */
4678 hdev
= hci_dev_get(req
->amp_id
);
4682 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4687 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4690 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4691 struct hci_conn
*hs_hcon
;
4693 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4697 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4702 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4704 mgr
->bredr_chan
= chan
;
4705 chan
->hs_hcon
= hs_hcon
;
4706 chan
->fcs
= L2CAP_FCS_NONE
;
4707 conn
->mtu
= hdev
->block_mtu
;
4716 rsp
.scid
= cpu_to_le16(scid
);
4717 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4718 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4720 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4726 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4728 struct l2cap_move_chan_req req
;
4731 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4733 ident
= l2cap_get_ident(chan
->conn
);
4734 chan
->ident
= ident
;
4736 req
.icid
= cpu_to_le16(chan
->scid
);
4737 req
.dest_amp_id
= dest_amp_id
;
4739 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4742 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4745 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4747 struct l2cap_move_chan_rsp rsp
;
4749 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4751 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4752 rsp
.result
= cpu_to_le16(result
);
4754 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4758 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4760 struct l2cap_move_chan_cfm cfm
;
4762 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4764 chan
->ident
= l2cap_get_ident(chan
->conn
);
4766 cfm
.icid
= cpu_to_le16(chan
->scid
);
4767 cfm
.result
= cpu_to_le16(result
);
4769 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4772 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4775 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4777 struct l2cap_move_chan_cfm cfm
;
4779 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4781 cfm
.icid
= cpu_to_le16(icid
);
4782 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4784 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4788 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4791 struct l2cap_move_chan_cfm_rsp rsp
;
4793 BT_DBG("icid 0x%4.4x", icid
);
4795 rsp
.icid
= cpu_to_le16(icid
);
4796 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4799 static void __release_logical_link(struct l2cap_chan
*chan
)
4801 chan
->hs_hchan
= NULL
;
4802 chan
->hs_hcon
= NULL
;
4804 /* Placeholder - release the logical link */
4807 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4809 /* Logical link setup failed */
4810 if (chan
->state
!= BT_CONNECTED
) {
4811 /* Create channel failure, disconnect */
4812 l2cap_send_disconn_req(chan
, ECONNRESET
);
4816 switch (chan
->move_role
) {
4817 case L2CAP_MOVE_ROLE_RESPONDER
:
4818 l2cap_move_done(chan
);
4819 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4821 case L2CAP_MOVE_ROLE_INITIATOR
:
4822 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4823 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4824 /* Remote has only sent pending or
4825 * success responses, clean up
4827 l2cap_move_done(chan
);
4830 /* Other amp move states imply that the move
4831 * has already aborted
4833 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4838 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4839 struct hci_chan
*hchan
)
4841 struct l2cap_conf_rsp rsp
;
4843 chan
->hs_hchan
= hchan
;
4844 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4846 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4848 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4851 set_default_fcs(chan
);
4853 err
= l2cap_ertm_init(chan
);
4855 l2cap_send_disconn_req(chan
, -err
);
4857 l2cap_chan_ready(chan
);
4861 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4862 struct hci_chan
*hchan
)
4864 chan
->hs_hcon
= hchan
->conn
;
4865 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4867 BT_DBG("move_state %d", chan
->move_state
);
4869 switch (chan
->move_state
) {
4870 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4871 /* Move confirm will be sent after a success
4872 * response is received
4874 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4876 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4877 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4878 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4879 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4880 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4881 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4882 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4883 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4884 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4888 /* Move was not in expected state, free the channel */
4889 __release_logical_link(chan
);
4891 chan
->move_state
= L2CAP_MOVE_STABLE
;
4895 /* Call with chan locked */
4896 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4899 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4902 l2cap_logical_fail(chan
);
4903 __release_logical_link(chan
);
4907 if (chan
->state
!= BT_CONNECTED
) {
4908 /* Ignore logical link if channel is on BR/EDR */
4909 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4910 l2cap_logical_finish_create(chan
, hchan
);
4912 l2cap_logical_finish_move(chan
, hchan
);
4916 void l2cap_move_start(struct l2cap_chan
*chan
)
4918 BT_DBG("chan %p", chan
);
4920 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4921 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4923 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4924 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4925 /* Placeholder - start physical link setup */
4927 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4928 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4930 l2cap_move_setup(chan
);
4931 l2cap_send_move_chan_req(chan
, 0);
4935 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4936 u8 local_amp_id
, u8 remote_amp_id
)
4938 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4939 local_amp_id
, remote_amp_id
);
4941 chan
->fcs
= L2CAP_FCS_NONE
;
4943 /* Outgoing channel on AMP */
4944 if (chan
->state
== BT_CONNECT
) {
4945 if (result
== L2CAP_CR_SUCCESS
) {
4946 chan
->local_amp_id
= local_amp_id
;
4947 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4949 /* Revert to BR/EDR connect */
4950 l2cap_send_conn_req(chan
);
4956 /* Incoming channel on AMP */
4957 if (__l2cap_no_conn_pending(chan
)) {
4958 struct l2cap_conn_rsp rsp
;
4960 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4961 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4963 if (result
== L2CAP_CR_SUCCESS
) {
4964 /* Send successful response */
4965 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4966 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4968 /* Send negative response */
4969 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4970 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4973 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4976 if (result
== L2CAP_CR_SUCCESS
) {
4977 l2cap_state_change(chan
, BT_CONFIG
);
4978 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4979 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4981 l2cap_build_conf_req(chan
, buf
), buf
);
4982 chan
->num_conf_req
++;
4987 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4990 l2cap_move_setup(chan
);
4991 chan
->move_id
= local_amp_id
;
4992 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4994 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4997 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4999 struct hci_chan
*hchan
= NULL
;
5001 /* Placeholder - get hci_chan for logical link */
5004 if (hchan
->state
== BT_CONNECTED
) {
5005 /* Logical link is ready to go */
5006 chan
->hs_hcon
= hchan
->conn
;
5007 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5008 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5009 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
5011 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5013 /* Wait for logical link to be ready */
5014 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5017 /* Logical link not available */
5018 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
5022 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
5024 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
5026 if (result
== -EINVAL
)
5027 rsp_result
= L2CAP_MR_BAD_ID
;
5029 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
5031 l2cap_send_move_chan_rsp(chan
, rsp_result
);
5034 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
5035 chan
->move_state
= L2CAP_MOVE_STABLE
;
5037 /* Restart data transmission */
5038 l2cap_ertm_send(chan
);
5041 /* Invoke with locked chan */
5042 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
5044 u8 local_amp_id
= chan
->local_amp_id
;
5045 u8 remote_amp_id
= chan
->remote_amp_id
;
5047 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5048 chan
, result
, local_amp_id
, remote_amp_id
);
5050 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
5051 l2cap_chan_unlock(chan
);
5055 if (chan
->state
!= BT_CONNECTED
) {
5056 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
5057 } else if (result
!= L2CAP_MR_SUCCESS
) {
5058 l2cap_do_move_cancel(chan
, result
);
5060 switch (chan
->move_role
) {
5061 case L2CAP_MOVE_ROLE_INITIATOR
:
5062 l2cap_do_move_initiate(chan
, local_amp_id
,
5065 case L2CAP_MOVE_ROLE_RESPONDER
:
5066 l2cap_do_move_respond(chan
, result
);
5069 l2cap_do_move_cancel(chan
, result
);
5075 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
5076 struct l2cap_cmd_hdr
*cmd
,
5077 u16 cmd_len
, void *data
)
5079 struct l2cap_move_chan_req
*req
= data
;
5080 struct l2cap_move_chan_rsp rsp
;
5081 struct l2cap_chan
*chan
;
5083 u16 result
= L2CAP_MR_NOT_ALLOWED
;
5085 if (cmd_len
!= sizeof(*req
))
5088 icid
= le16_to_cpu(req
->icid
);
5090 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
5092 if (!conn
->hs_enabled
)
5095 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5097 rsp
.icid
= cpu_to_le16(icid
);
5098 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
5099 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
5104 chan
->ident
= cmd
->ident
;
5106 if (chan
->scid
< L2CAP_CID_DYN_START
||
5107 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
5108 (chan
->mode
!= L2CAP_MODE_ERTM
&&
5109 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
5110 result
= L2CAP_MR_NOT_ALLOWED
;
5111 goto send_move_response
;
5114 if (chan
->local_amp_id
== req
->dest_amp_id
) {
5115 result
= L2CAP_MR_SAME_ID
;
5116 goto send_move_response
;
5119 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
5120 struct hci_dev
*hdev
;
5121 hdev
= hci_dev_get(req
->dest_amp_id
);
5122 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
5123 !test_bit(HCI_UP
, &hdev
->flags
)) {
5127 result
= L2CAP_MR_BAD_ID
;
5128 goto send_move_response
;
5133 /* Detect a move collision. Only send a collision response
5134 * if this side has "lost", otherwise proceed with the move.
5135 * The winner has the larger bd_addr.
5137 if ((__chan_is_moving(chan
) ||
5138 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
5139 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
5140 result
= L2CAP_MR_COLLISION
;
5141 goto send_move_response
;
5144 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5145 l2cap_move_setup(chan
);
5146 chan
->move_id
= req
->dest_amp_id
;
5149 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
5150 /* Moving to BR/EDR */
5151 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5152 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5153 result
= L2CAP_MR_PEND
;
5155 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5156 result
= L2CAP_MR_SUCCESS
;
5159 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
5160 /* Placeholder - uncomment when amp functions are available */
5161 /*amp_accept_physical(chan, req->dest_amp_id);*/
5162 result
= L2CAP_MR_PEND
;
5166 l2cap_send_move_chan_rsp(chan
, result
);
5168 l2cap_chan_unlock(chan
);
5173 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
5175 struct l2cap_chan
*chan
;
5176 struct hci_chan
*hchan
= NULL
;
5178 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5180 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5184 __clear_chan_timer(chan
);
5185 if (result
== L2CAP_MR_PEND
)
5186 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5188 switch (chan
->move_state
) {
5189 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5190 /* Move confirm will be sent when logical link
5193 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5195 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5196 if (result
== L2CAP_MR_PEND
) {
5198 } else if (test_bit(CONN_LOCAL_BUSY
,
5199 &chan
->conn_state
)) {
5200 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5202 /* Logical link is up or moving to BR/EDR,
5205 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5206 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5209 case L2CAP_MOVE_WAIT_RSP
:
5211 if (result
== L2CAP_MR_SUCCESS
) {
5212 /* Remote is ready, send confirm immediately
5213 * after logical link is ready
5215 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5217 /* Both logical link and move success
5218 * are required to confirm
5220 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5223 /* Placeholder - get hci_chan for logical link */
5225 /* Logical link not available */
5226 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5230 /* If the logical link is not yet connected, do not
5231 * send confirmation.
5233 if (hchan
->state
!= BT_CONNECTED
)
5236 /* Logical link is already ready to go */
5238 chan
->hs_hcon
= hchan
->conn
;
5239 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5241 if (result
== L2CAP_MR_SUCCESS
) {
5242 /* Can confirm now */
5243 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5245 /* Now only need move success
5248 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5251 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5254 /* Any other amp move state means the move failed. */
5255 chan
->move_id
= chan
->local_amp_id
;
5256 l2cap_move_done(chan
);
5257 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5260 l2cap_chan_unlock(chan
);
5263 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5266 struct l2cap_chan
*chan
;
5268 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5270 /* Could not locate channel, icid is best guess */
5271 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5275 __clear_chan_timer(chan
);
5277 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5278 if (result
== L2CAP_MR_COLLISION
) {
5279 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5281 /* Cleanup - cancel move */
5282 chan
->move_id
= chan
->local_amp_id
;
5283 l2cap_move_done(chan
);
5287 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5289 l2cap_chan_unlock(chan
);
5292 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5293 struct l2cap_cmd_hdr
*cmd
,
5294 u16 cmd_len
, void *data
)
5296 struct l2cap_move_chan_rsp
*rsp
= data
;
5299 if (cmd_len
!= sizeof(*rsp
))
5302 icid
= le16_to_cpu(rsp
->icid
);
5303 result
= le16_to_cpu(rsp
->result
);
5305 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5307 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5308 l2cap_move_continue(conn
, icid
, result
);
5310 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5315 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5316 struct l2cap_cmd_hdr
*cmd
,
5317 u16 cmd_len
, void *data
)
5319 struct l2cap_move_chan_cfm
*cfm
= data
;
5320 struct l2cap_chan
*chan
;
5323 if (cmd_len
!= sizeof(*cfm
))
5326 icid
= le16_to_cpu(cfm
->icid
);
5327 result
= le16_to_cpu(cfm
->result
);
5329 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5331 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5333 /* Spec requires a response even if the icid was not found */
5334 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5338 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5339 if (result
== L2CAP_MC_CONFIRMED
) {
5340 chan
->local_amp_id
= chan
->move_id
;
5341 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5342 __release_logical_link(chan
);
5344 chan
->move_id
= chan
->local_amp_id
;
5347 l2cap_move_done(chan
);
5350 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5352 l2cap_chan_unlock(chan
);
5357 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5358 struct l2cap_cmd_hdr
*cmd
,
5359 u16 cmd_len
, void *data
)
5361 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5362 struct l2cap_chan
*chan
;
5365 if (cmd_len
!= sizeof(*rsp
))
5368 icid
= le16_to_cpu(rsp
->icid
);
5370 BT_DBG("icid 0x%4.4x", icid
);
5372 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5376 __clear_chan_timer(chan
);
5378 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5379 chan
->local_amp_id
= chan
->move_id
;
5381 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5382 __release_logical_link(chan
);
5384 l2cap_move_done(chan
);
5387 l2cap_chan_unlock(chan
);
5392 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5397 if (min
> max
|| min
< 6 || max
> 3200)
5400 if (to_multiplier
< 10 || to_multiplier
> 3200)
5403 if (max
>= to_multiplier
* 8)
5406 max_latency
= (to_multiplier
* 8 / max
) - 1;
5407 if (latency
> 499 || latency
> max_latency
)
5413 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5414 struct l2cap_cmd_hdr
*cmd
,
5415 u16 cmd_len
, u8
*data
)
5417 struct hci_conn
*hcon
= conn
->hcon
;
5418 struct l2cap_conn_param_update_req
*req
;
5419 struct l2cap_conn_param_update_rsp rsp
;
5420 u16 min
, max
, latency
, to_multiplier
;
5423 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5426 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5429 req
= (struct l2cap_conn_param_update_req
*) data
;
5430 min
= __le16_to_cpu(req
->min
);
5431 max
= __le16_to_cpu(req
->max
);
5432 latency
= __le16_to_cpu(req
->latency
);
5433 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5435 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5436 min
, max
, latency
, to_multiplier
);
5438 memset(&rsp
, 0, sizeof(rsp
));
5440 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5442 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5444 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5446 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5450 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5455 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5456 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5459 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5460 u16 dcid
, mtu
, mps
, credits
, result
;
5461 struct l2cap_chan
*chan
;
5464 if (cmd_len
< sizeof(*rsp
))
5467 dcid
= __le16_to_cpu(rsp
->dcid
);
5468 mtu
= __le16_to_cpu(rsp
->mtu
);
5469 mps
= __le16_to_cpu(rsp
->mps
);
5470 credits
= __le16_to_cpu(rsp
->credits
);
5471 result
= __le16_to_cpu(rsp
->result
);
5473 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5476 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5477 dcid
, mtu
, mps
, credits
, result
);
5479 mutex_lock(&conn
->chan_lock
);
5481 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5489 l2cap_chan_lock(chan
);
5492 case L2CAP_CR_SUCCESS
:
5496 chan
->remote_mps
= mps
;
5497 chan
->tx_credits
= credits
;
5498 l2cap_chan_ready(chan
);
5502 l2cap_chan_del(chan
, ECONNREFUSED
);
5506 l2cap_chan_unlock(chan
);
5509 mutex_unlock(&conn
->chan_lock
);
5514 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5515 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5520 switch (cmd
->code
) {
5521 case L2CAP_COMMAND_REJ
:
5522 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5525 case L2CAP_CONN_REQ
:
5526 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5529 case L2CAP_CONN_RSP
:
5530 case L2CAP_CREATE_CHAN_RSP
:
5531 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5534 case L2CAP_CONF_REQ
:
5535 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5538 case L2CAP_CONF_RSP
:
5539 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5542 case L2CAP_DISCONN_REQ
:
5543 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5546 case L2CAP_DISCONN_RSP
:
5547 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5550 case L2CAP_ECHO_REQ
:
5551 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5554 case L2CAP_ECHO_RSP
:
5557 case L2CAP_INFO_REQ
:
5558 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5561 case L2CAP_INFO_RSP
:
5562 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5565 case L2CAP_CREATE_CHAN_REQ
:
5566 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5569 case L2CAP_MOVE_CHAN_REQ
:
5570 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5573 case L2CAP_MOVE_CHAN_RSP
:
5574 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5577 case L2CAP_MOVE_CHAN_CFM
:
5578 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5581 case L2CAP_MOVE_CHAN_CFM_RSP
:
5582 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5586 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5594 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5595 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5598 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5599 struct l2cap_le_conn_rsp rsp
;
5600 struct l2cap_chan
*chan
, *pchan
;
5601 u16 dcid
, scid
, credits
, mtu
, mps
;
5605 if (cmd_len
!= sizeof(*req
))
5608 scid
= __le16_to_cpu(req
->scid
);
5609 mtu
= __le16_to_cpu(req
->mtu
);
5610 mps
= __le16_to_cpu(req
->mps
);
5615 if (mtu
< 23 || mps
< 23)
5618 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5621 /* Check if we have socket listening on psm */
5622 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5623 &conn
->hcon
->dst
, LE_LINK
);
5625 result
= L2CAP_CR_BAD_PSM
;
5630 mutex_lock(&conn
->chan_lock
);
5631 l2cap_chan_lock(pchan
);
5633 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5634 result
= L2CAP_CR_AUTHENTICATION
;
5636 goto response_unlock
;
5639 /* Check if we already have channel with that dcid */
5640 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5641 result
= L2CAP_CR_NO_MEM
;
5643 goto response_unlock
;
5646 chan
= pchan
->ops
->new_connection(pchan
);
5648 result
= L2CAP_CR_NO_MEM
;
5649 goto response_unlock
;
5652 l2cap_le_flowctl_init(chan
);
5654 bacpy(&chan
->src
, &conn
->hcon
->src
);
5655 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5656 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5657 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5661 chan
->remote_mps
= mps
;
5662 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5664 __l2cap_chan_add(conn
, chan
);
5666 credits
= chan
->rx_credits
;
5668 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5670 chan
->ident
= cmd
->ident
;
5672 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5673 l2cap_state_change(chan
, BT_CONNECT2
);
5674 result
= L2CAP_CR_PEND
;
5675 chan
->ops
->defer(chan
);
5677 l2cap_chan_ready(chan
);
5678 result
= L2CAP_CR_SUCCESS
;
5682 l2cap_chan_unlock(pchan
);
5683 mutex_unlock(&conn
->chan_lock
);
5685 if (result
== L2CAP_CR_PEND
)
5690 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5691 rsp
.mps
= cpu_to_le16(chan
->mps
);
5697 rsp
.dcid
= cpu_to_le16(dcid
);
5698 rsp
.credits
= cpu_to_le16(credits
);
5699 rsp
.result
= cpu_to_le16(result
);
5701 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5706 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5707 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5710 struct l2cap_le_credits
*pkt
;
5711 struct l2cap_chan
*chan
;
5714 if (cmd_len
!= sizeof(*pkt
))
5717 pkt
= (struct l2cap_le_credits
*) data
;
5718 cid
= __le16_to_cpu(pkt
->cid
);
5719 credits
= __le16_to_cpu(pkt
->credits
);
5721 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5723 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5727 chan
->tx_credits
+= credits
;
5729 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5730 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5734 if (chan
->tx_credits
)
5735 chan
->ops
->resume(chan
);
5737 l2cap_chan_unlock(chan
);
5742 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5743 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5746 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5747 struct l2cap_chan
*chan
;
5749 if (cmd_len
< sizeof(*rej
))
5752 mutex_lock(&conn
->chan_lock
);
5754 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5758 l2cap_chan_lock(chan
);
5759 l2cap_chan_del(chan
, ECONNREFUSED
);
5760 l2cap_chan_unlock(chan
);
5763 mutex_unlock(&conn
->chan_lock
);
5767 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5768 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5773 if (!enable_lecoc
) {
5774 switch (cmd
->code
) {
5775 case L2CAP_LE_CONN_REQ
:
5776 case L2CAP_LE_CONN_RSP
:
5777 case L2CAP_LE_CREDITS
:
5778 case L2CAP_DISCONN_REQ
:
5779 case L2CAP_DISCONN_RSP
:
5784 switch (cmd
->code
) {
5785 case L2CAP_COMMAND_REJ
:
5786 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5789 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5790 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5793 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5796 case L2CAP_LE_CONN_RSP
:
5797 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5800 case L2CAP_LE_CONN_REQ
:
5801 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5804 case L2CAP_LE_CREDITS
:
5805 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5808 case L2CAP_DISCONN_REQ
:
5809 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5812 case L2CAP_DISCONN_RSP
:
5813 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5817 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5825 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5826 struct sk_buff
*skb
)
5828 struct hci_conn
*hcon
= conn
->hcon
;
5829 struct l2cap_cmd_hdr
*cmd
;
5833 if (hcon
->type
!= LE_LINK
)
5836 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5839 cmd
= (void *) skb
->data
;
5840 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5842 len
= le16_to_cpu(cmd
->len
);
5844 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5846 if (len
!= skb
->len
|| !cmd
->ident
) {
5847 BT_DBG("corrupted command");
5851 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5853 struct l2cap_cmd_rej_unk rej
;
5855 BT_ERR("Wrong link type (%d)", err
);
5857 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5858 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5866 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5867 struct sk_buff
*skb
)
5869 struct hci_conn
*hcon
= conn
->hcon
;
5870 u8
*data
= skb
->data
;
5872 struct l2cap_cmd_hdr cmd
;
5875 l2cap_raw_recv(conn
, skb
);
5877 if (hcon
->type
!= ACL_LINK
)
5880 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5882 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5883 data
+= L2CAP_CMD_HDR_SIZE
;
5884 len
-= L2CAP_CMD_HDR_SIZE
;
5886 cmd_len
= le16_to_cpu(cmd
.len
);
5888 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5891 if (cmd_len
> len
|| !cmd
.ident
) {
5892 BT_DBG("corrupted command");
5896 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5898 struct l2cap_cmd_rej_unk rej
;
5900 BT_ERR("Wrong link type (%d)", err
);
5902 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5903 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5915 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5917 u16 our_fcs
, rcv_fcs
;
5920 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5921 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5923 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5925 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5926 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5927 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5928 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5930 if (our_fcs
!= rcv_fcs
)
5936 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5938 struct l2cap_ctrl control
;
5940 BT_DBG("chan %p", chan
);
5942 memset(&control
, 0, sizeof(control
));
5945 control
.reqseq
= chan
->buffer_seq
;
5946 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5948 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5949 control
.super
= L2CAP_SUPER_RNR
;
5950 l2cap_send_sframe(chan
, &control
);
5953 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5954 chan
->unacked_frames
> 0)
5955 __set_retrans_timer(chan
);
5957 /* Send pending iframes */
5958 l2cap_ertm_send(chan
);
5960 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5961 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5962 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5965 control
.super
= L2CAP_SUPER_RR
;
5966 l2cap_send_sframe(chan
, &control
);
5970 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5971 struct sk_buff
**last_frag
)
5973 /* skb->len reflects data in skb as well as all fragments
5974 * skb->data_len reflects only data in fragments
5976 if (!skb_has_frag_list(skb
))
5977 skb_shinfo(skb
)->frag_list
= new_frag
;
5979 new_frag
->next
= NULL
;
5981 (*last_frag
)->next
= new_frag
;
5982 *last_frag
= new_frag
;
5984 skb
->len
+= new_frag
->len
;
5985 skb
->data_len
+= new_frag
->len
;
5986 skb
->truesize
+= new_frag
->truesize
;
5989 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5990 struct l2cap_ctrl
*control
)
5994 switch (control
->sar
) {
5995 case L2CAP_SAR_UNSEGMENTED
:
5999 err
= chan
->ops
->recv(chan
, skb
);
6002 case L2CAP_SAR_START
:
6006 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
6007 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6009 if (chan
->sdu_len
> chan
->imtu
) {
6014 if (skb
->len
>= chan
->sdu_len
)
6018 chan
->sdu_last_frag
= skb
;
6024 case L2CAP_SAR_CONTINUE
:
6028 append_skb_frag(chan
->sdu
, skb
,
6029 &chan
->sdu_last_frag
);
6032 if (chan
->sdu
->len
>= chan
->sdu_len
)
6042 append_skb_frag(chan
->sdu
, skb
,
6043 &chan
->sdu_last_frag
);
6046 if (chan
->sdu
->len
!= chan
->sdu_len
)
6049 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6052 /* Reassembly complete */
6054 chan
->sdu_last_frag
= NULL
;
6062 kfree_skb(chan
->sdu
);
6064 chan
->sdu_last_frag
= NULL
;
6071 static int l2cap_resegment(struct l2cap_chan
*chan
)
6077 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
6081 if (chan
->mode
!= L2CAP_MODE_ERTM
)
6084 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
6085 l2cap_tx(chan
, NULL
, NULL
, event
);
6088 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
6091 /* Pass sequential frames to l2cap_reassemble_sdu()
6092 * until a gap is encountered.
6095 BT_DBG("chan %p", chan
);
6097 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6098 struct sk_buff
*skb
;
6099 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6100 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
6102 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
6107 skb_unlink(skb
, &chan
->srej_q
);
6108 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6109 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
6114 if (skb_queue_empty(&chan
->srej_q
)) {
6115 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6116 l2cap_send_ack(chan
);
6122 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
6123 struct l2cap_ctrl
*control
)
6125 struct sk_buff
*skb
;
6127 BT_DBG("chan %p, control %p", chan
, control
);
6129 if (control
->reqseq
== chan
->next_tx_seq
) {
6130 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6131 l2cap_send_disconn_req(chan
, ECONNRESET
);
6135 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6138 BT_DBG("Seq %d not available for retransmission",
6143 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6144 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6145 l2cap_send_disconn_req(chan
, ECONNRESET
);
6149 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6151 if (control
->poll
) {
6152 l2cap_pass_to_tx(chan
, control
);
6154 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6155 l2cap_retransmit(chan
, control
);
6156 l2cap_ertm_send(chan
);
6158 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6159 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6160 chan
->srej_save_reqseq
= control
->reqseq
;
6163 l2cap_pass_to_tx_fbit(chan
, control
);
6165 if (control
->final
) {
6166 if (chan
->srej_save_reqseq
!= control
->reqseq
||
6167 !test_and_clear_bit(CONN_SREJ_ACT
,
6169 l2cap_retransmit(chan
, control
);
6171 l2cap_retransmit(chan
, control
);
6172 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6173 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6174 chan
->srej_save_reqseq
= control
->reqseq
;
6180 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
6181 struct l2cap_ctrl
*control
)
6183 struct sk_buff
*skb
;
6185 BT_DBG("chan %p, control %p", chan
, control
);
6187 if (control
->reqseq
== chan
->next_tx_seq
) {
6188 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6189 l2cap_send_disconn_req(chan
, ECONNRESET
);
6193 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6195 if (chan
->max_tx
&& skb
&&
6196 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6197 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6198 l2cap_send_disconn_req(chan
, ECONNRESET
);
6202 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6204 l2cap_pass_to_tx(chan
, control
);
6206 if (control
->final
) {
6207 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6208 l2cap_retransmit_all(chan
, control
);
6210 l2cap_retransmit_all(chan
, control
);
6211 l2cap_ertm_send(chan
);
6212 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6213 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6217 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6219 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6221 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6222 chan
->expected_tx_seq
);
6224 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6225 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6227 /* See notes below regarding "double poll" and
6230 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6231 BT_DBG("Invalid/Ignore - after SREJ");
6232 return L2CAP_TXSEQ_INVALID_IGNORE
;
6234 BT_DBG("Invalid - in window after SREJ sent");
6235 return L2CAP_TXSEQ_INVALID
;
6239 if (chan
->srej_list
.head
== txseq
) {
6240 BT_DBG("Expected SREJ");
6241 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6244 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6245 BT_DBG("Duplicate SREJ - txseq already stored");
6246 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6249 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6250 BT_DBG("Unexpected SREJ - not requested");
6251 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6255 if (chan
->expected_tx_seq
== txseq
) {
6256 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6258 BT_DBG("Invalid - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID
;
6262 return L2CAP_TXSEQ_EXPECTED
;
6266 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6267 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6268 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6269 return L2CAP_TXSEQ_DUPLICATE
;
6272 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6273 /* A source of invalid packets is a "double poll" condition,
6274 * where delays cause us to send multiple poll packets. If
6275 * the remote stack receives and processes both polls,
6276 * sequence numbers can wrap around in such a way that a
6277 * resent frame has a sequence number that looks like new data
6278 * with a sequence gap. This would trigger an erroneous SREJ
6281 * Fortunately, this is impossible with a tx window that's
6282 * less than half of the maximum sequence number, which allows
6283 * invalid frames to be safely ignored.
6285 * With tx window sizes greater than half of the tx window
6286 * maximum, the frame is invalid and cannot be ignored. This
6287 * causes a disconnect.
6290 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6291 BT_DBG("Invalid/Ignore - txseq outside tx window");
6292 return L2CAP_TXSEQ_INVALID_IGNORE
;
6294 BT_DBG("Invalid - txseq outside tx window");
6295 return L2CAP_TXSEQ_INVALID
;
6298 BT_DBG("Unexpected - txseq indicates missing frames");
6299 return L2CAP_TXSEQ_UNEXPECTED
;
6303 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6304 struct l2cap_ctrl
*control
,
6305 struct sk_buff
*skb
, u8 event
)
6308 bool skb_in_use
= false;
6310 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6314 case L2CAP_EV_RECV_IFRAME
:
6315 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6316 case L2CAP_TXSEQ_EXPECTED
:
6317 l2cap_pass_to_tx(chan
, control
);
6319 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6320 BT_DBG("Busy, discarding expected seq %d",
6325 chan
->expected_tx_seq
= __next_seq(chan
,
6328 chan
->buffer_seq
= chan
->expected_tx_seq
;
6331 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6335 if (control
->final
) {
6336 if (!test_and_clear_bit(CONN_REJ_ACT
,
6337 &chan
->conn_state
)) {
6339 l2cap_retransmit_all(chan
, control
);
6340 l2cap_ertm_send(chan
);
6344 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6345 l2cap_send_ack(chan
);
6347 case L2CAP_TXSEQ_UNEXPECTED
:
6348 l2cap_pass_to_tx(chan
, control
);
6350 /* Can't issue SREJ frames in the local busy state.
6351 * Drop this frame, it will be seen as missing
6352 * when local busy is exited.
6354 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6355 BT_DBG("Busy, discarding unexpected seq %d",
6360 /* There was a gap in the sequence, so an SREJ
6361 * must be sent for each missing frame. The
6362 * current frame is stored for later use.
6364 skb_queue_tail(&chan
->srej_q
, skb
);
6366 BT_DBG("Queued %p (queue len %d)", skb
,
6367 skb_queue_len(&chan
->srej_q
));
6369 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6370 l2cap_seq_list_clear(&chan
->srej_list
);
6371 l2cap_send_srej(chan
, control
->txseq
);
6373 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6375 case L2CAP_TXSEQ_DUPLICATE
:
6376 l2cap_pass_to_tx(chan
, control
);
6378 case L2CAP_TXSEQ_INVALID_IGNORE
:
6380 case L2CAP_TXSEQ_INVALID
:
6382 l2cap_send_disconn_req(chan
, ECONNRESET
);
6386 case L2CAP_EV_RECV_RR
:
6387 l2cap_pass_to_tx(chan
, control
);
6388 if (control
->final
) {
6389 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6391 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6392 !__chan_is_moving(chan
)) {
6394 l2cap_retransmit_all(chan
, control
);
6397 l2cap_ertm_send(chan
);
6398 } else if (control
->poll
) {
6399 l2cap_send_i_or_rr_or_rnr(chan
);
6401 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6402 &chan
->conn_state
) &&
6403 chan
->unacked_frames
)
6404 __set_retrans_timer(chan
);
6406 l2cap_ertm_send(chan
);
6409 case L2CAP_EV_RECV_RNR
:
6410 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6411 l2cap_pass_to_tx(chan
, control
);
6412 if (control
&& control
->poll
) {
6413 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6414 l2cap_send_rr_or_rnr(chan
, 0);
6416 __clear_retrans_timer(chan
);
6417 l2cap_seq_list_clear(&chan
->retrans_list
);
6419 case L2CAP_EV_RECV_REJ
:
6420 l2cap_handle_rej(chan
, control
);
6422 case L2CAP_EV_RECV_SREJ
:
6423 l2cap_handle_srej(chan
, control
);
6429 if (skb
&& !skb_in_use
) {
6430 BT_DBG("Freeing %p", skb
);
6437 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6438 struct l2cap_ctrl
*control
,
6439 struct sk_buff
*skb
, u8 event
)
6442 u16 txseq
= control
->txseq
;
6443 bool skb_in_use
= false;
6445 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6449 case L2CAP_EV_RECV_IFRAME
:
6450 switch (l2cap_classify_txseq(chan
, txseq
)) {
6451 case L2CAP_TXSEQ_EXPECTED
:
6452 /* Keep frame for reassembly later */
6453 l2cap_pass_to_tx(chan
, control
);
6454 skb_queue_tail(&chan
->srej_q
, skb
);
6456 BT_DBG("Queued %p (queue len %d)", skb
,
6457 skb_queue_len(&chan
->srej_q
));
6459 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6461 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6462 l2cap_seq_list_pop(&chan
->srej_list
);
6464 l2cap_pass_to_tx(chan
, control
);
6465 skb_queue_tail(&chan
->srej_q
, skb
);
6467 BT_DBG("Queued %p (queue len %d)", skb
,
6468 skb_queue_len(&chan
->srej_q
));
6470 err
= l2cap_rx_queued_iframes(chan
);
6475 case L2CAP_TXSEQ_UNEXPECTED
:
6476 /* Got a frame that can't be reassembled yet.
6477 * Save it for later, and send SREJs to cover
6478 * the missing frames.
6480 skb_queue_tail(&chan
->srej_q
, skb
);
6482 BT_DBG("Queued %p (queue len %d)", skb
,
6483 skb_queue_len(&chan
->srej_q
));
6485 l2cap_pass_to_tx(chan
, control
);
6486 l2cap_send_srej(chan
, control
->txseq
);
6488 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6489 /* This frame was requested with an SREJ, but
6490 * some expected retransmitted frames are
6491 * missing. Request retransmission of missing
6494 skb_queue_tail(&chan
->srej_q
, skb
);
6496 BT_DBG("Queued %p (queue len %d)", skb
,
6497 skb_queue_len(&chan
->srej_q
));
6499 l2cap_pass_to_tx(chan
, control
);
6500 l2cap_send_srej_list(chan
, control
->txseq
);
6502 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6503 /* We've already queued this frame. Drop this copy. */
6504 l2cap_pass_to_tx(chan
, control
);
6506 case L2CAP_TXSEQ_DUPLICATE
:
6507 /* Expecting a later sequence number, so this frame
6508 * was already received. Ignore it completely.
6511 case L2CAP_TXSEQ_INVALID_IGNORE
:
6513 case L2CAP_TXSEQ_INVALID
:
6515 l2cap_send_disconn_req(chan
, ECONNRESET
);
6519 case L2CAP_EV_RECV_RR
:
6520 l2cap_pass_to_tx(chan
, control
);
6521 if (control
->final
) {
6522 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6524 if (!test_and_clear_bit(CONN_REJ_ACT
,
6525 &chan
->conn_state
)) {
6527 l2cap_retransmit_all(chan
, control
);
6530 l2cap_ertm_send(chan
);
6531 } else if (control
->poll
) {
6532 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6533 &chan
->conn_state
) &&
6534 chan
->unacked_frames
) {
6535 __set_retrans_timer(chan
);
6538 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6539 l2cap_send_srej_tail(chan
);
6541 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6542 &chan
->conn_state
) &&
6543 chan
->unacked_frames
)
6544 __set_retrans_timer(chan
);
6546 l2cap_send_ack(chan
);
6549 case L2CAP_EV_RECV_RNR
:
6550 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6551 l2cap_pass_to_tx(chan
, control
);
6552 if (control
->poll
) {
6553 l2cap_send_srej_tail(chan
);
6555 struct l2cap_ctrl rr_control
;
6556 memset(&rr_control
, 0, sizeof(rr_control
));
6557 rr_control
.sframe
= 1;
6558 rr_control
.super
= L2CAP_SUPER_RR
;
6559 rr_control
.reqseq
= chan
->buffer_seq
;
6560 l2cap_send_sframe(chan
, &rr_control
);
6564 case L2CAP_EV_RECV_REJ
:
6565 l2cap_handle_rej(chan
, control
);
6567 case L2CAP_EV_RECV_SREJ
:
6568 l2cap_handle_srej(chan
, control
);
6572 if (skb
&& !skb_in_use
) {
6573 BT_DBG("Freeing %p", skb
);
6580 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6582 BT_DBG("chan %p", chan
);
6584 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6587 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6589 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6591 return l2cap_resegment(chan
);
6594 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6595 struct l2cap_ctrl
*control
,
6596 struct sk_buff
*skb
, u8 event
)
6600 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6606 l2cap_process_reqseq(chan
, control
->reqseq
);
6608 if (!skb_queue_empty(&chan
->tx_q
))
6609 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6611 chan
->tx_send_head
= NULL
;
6613 /* Rewind next_tx_seq to the point expected
6616 chan
->next_tx_seq
= control
->reqseq
;
6617 chan
->unacked_frames
= 0;
6619 err
= l2cap_finish_move(chan
);
6623 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6624 l2cap_send_i_or_rr_or_rnr(chan
);
6626 if (event
== L2CAP_EV_RECV_IFRAME
)
6629 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6632 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6633 struct l2cap_ctrl
*control
,
6634 struct sk_buff
*skb
, u8 event
)
6638 if (!control
->final
)
6641 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6643 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6644 l2cap_process_reqseq(chan
, control
->reqseq
);
6646 if (!skb_queue_empty(&chan
->tx_q
))
6647 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6649 chan
->tx_send_head
= NULL
;
6651 /* Rewind next_tx_seq to the point expected
6654 chan
->next_tx_seq
= control
->reqseq
;
6655 chan
->unacked_frames
= 0;
6658 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6660 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6662 err
= l2cap_resegment(chan
);
6665 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6670 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6672 /* Make sure reqseq is for a packet that has been sent but not acked */
6675 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6676 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6679 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6680 struct sk_buff
*skb
, u8 event
)
6684 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6685 control
, skb
, event
, chan
->rx_state
);
6687 if (__valid_reqseq(chan
, control
->reqseq
)) {
6688 switch (chan
->rx_state
) {
6689 case L2CAP_RX_STATE_RECV
:
6690 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6692 case L2CAP_RX_STATE_SREJ_SENT
:
6693 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6696 case L2CAP_RX_STATE_WAIT_P
:
6697 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6699 case L2CAP_RX_STATE_WAIT_F
:
6700 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6707 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6708 control
->reqseq
, chan
->next_tx_seq
,
6709 chan
->expected_ack_seq
);
6710 l2cap_send_disconn_req(chan
, ECONNRESET
);
6716 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6717 struct sk_buff
*skb
)
6721 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6724 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6725 L2CAP_TXSEQ_EXPECTED
) {
6726 l2cap_pass_to_tx(chan
, control
);
6728 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6729 __next_seq(chan
, chan
->buffer_seq
));
6731 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6733 l2cap_reassemble_sdu(chan
, skb
, control
);
6736 kfree_skb(chan
->sdu
);
6739 chan
->sdu_last_frag
= NULL
;
6743 BT_DBG("Freeing %p", skb
);
6748 chan
->last_acked_seq
= control
->txseq
;
6749 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6754 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6756 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6760 __unpack_control(chan
, skb
);
6765 * We can just drop the corrupted I-frame here.
6766 * Receiver will miss it and start proper recovery
6767 * procedures and ask for retransmission.
6769 if (l2cap_check_fcs(chan
, skb
))
6772 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6773 len
-= L2CAP_SDULEN_SIZE
;
6775 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6776 len
-= L2CAP_FCS_SIZE
;
6778 if (len
> chan
->mps
) {
6779 l2cap_send_disconn_req(chan
, ECONNRESET
);
6783 if (!control
->sframe
) {
6786 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6787 control
->sar
, control
->reqseq
, control
->final
,
6790 /* Validate F-bit - F=0 always valid, F=1 only
6791 * valid in TX WAIT_F
6793 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6796 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6797 event
= L2CAP_EV_RECV_IFRAME
;
6798 err
= l2cap_rx(chan
, control
, skb
, event
);
6800 err
= l2cap_stream_rx(chan
, control
, skb
);
6804 l2cap_send_disconn_req(chan
, ECONNRESET
);
6806 const u8 rx_func_to_event
[4] = {
6807 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6808 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6811 /* Only I-frames are expected in streaming mode */
6812 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6815 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6816 control
->reqseq
, control
->final
, control
->poll
,
6820 BT_ERR("Trailing bytes: %d in sframe", len
);
6821 l2cap_send_disconn_req(chan
, ECONNRESET
);
6825 /* Validate F and P bits */
6826 if (control
->final
&& (control
->poll
||
6827 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6830 event
= rx_func_to_event
[control
->super
];
6831 if (l2cap_rx(chan
, control
, skb
, event
))
6832 l2cap_send_disconn_req(chan
, ECONNRESET
);
6842 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6844 struct l2cap_conn
*conn
= chan
->conn
;
6845 struct l2cap_le_credits pkt
;
6848 /* We return more credits to the sender only after the amount of
6849 * credits falls below half of the initial amount.
6851 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6854 return_credits
= le_max_credits
- chan
->rx_credits
;
6856 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6858 chan
->rx_credits
+= return_credits
;
6860 pkt
.cid
= cpu_to_le16(chan
->scid
);
6861 pkt
.credits
= cpu_to_le16(return_credits
);
6863 chan
->ident
= l2cap_get_ident(conn
);
6865 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6868 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6872 if (!chan
->rx_credits
) {
6873 BT_ERR("No credits to receive LE L2CAP data");
6877 if (chan
->imtu
< skb
->len
) {
6878 BT_ERR("Too big LE L2CAP PDU");
6883 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6885 l2cap_chan_le_send_credits(chan
);
6892 sdu_len
= get_unaligned_le16(skb
->data
);
6893 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6895 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6896 sdu_len
, skb
->len
, chan
->imtu
);
6898 if (sdu_len
> chan
->imtu
) {
6899 BT_ERR("Too big LE L2CAP SDU length received");
6904 if (skb
->len
> sdu_len
) {
6905 BT_ERR("Too much LE L2CAP data received");
6910 if (skb
->len
== sdu_len
)
6911 return chan
->ops
->recv(chan
, skb
);
6914 chan
->sdu_len
= sdu_len
;
6915 chan
->sdu_last_frag
= skb
;
6920 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6921 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6923 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6924 BT_ERR("Too much LE L2CAP data received");
6929 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6932 if (chan
->sdu
->len
== chan
->sdu_len
) {
6933 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6936 chan
->sdu_last_frag
= NULL
;
6944 kfree_skb(chan
->sdu
);
6946 chan
->sdu_last_frag
= NULL
;
6950 /* We can't return an error here since we took care of the skb
6951 * freeing internally. An error return would cause the caller to
6952 * do a double-free of the skb.
6957 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6958 struct sk_buff
*skb
)
6960 struct l2cap_chan
*chan
;
6962 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6964 if (cid
== L2CAP_CID_A2MP
) {
6965 chan
= a2mp_channel_create(conn
, skb
);
6971 l2cap_chan_lock(chan
);
6973 BT_DBG("unknown cid 0x%4.4x", cid
);
6974 /* Drop packet and return */
6980 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6982 if (chan
->state
!= BT_CONNECTED
)
6985 switch (chan
->mode
) {
6986 case L2CAP_MODE_LE_FLOWCTL
:
6987 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6992 case L2CAP_MODE_BASIC
:
6993 /* If socket recv buffers overflows we drop data here
6994 * which is *bad* because L2CAP has to be reliable.
6995 * But we don't have any other choice. L2CAP doesn't
6996 * provide flow control mechanism. */
6998 if (chan
->imtu
< skb
->len
)
7001 if (!chan
->ops
->recv(chan
, skb
))
7005 case L2CAP_MODE_ERTM
:
7006 case L2CAP_MODE_STREAMING
:
7007 l2cap_data_rcv(chan
, skb
);
7011 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
7019 l2cap_chan_unlock(chan
);
7022 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
7023 struct sk_buff
*skb
)
7025 struct hci_conn
*hcon
= conn
->hcon
;
7026 struct l2cap_chan
*chan
;
7028 if (hcon
->type
!= ACL_LINK
)
7031 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
7036 BT_DBG("chan %p, len %d", chan
, skb
->len
);
7038 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
7041 if (chan
->imtu
< skb
->len
)
7044 /* Store remote BD_ADDR and PSM for msg_name */
7045 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
7046 bt_cb(skb
)->psm
= psm
;
7048 if (!chan
->ops
->recv(chan
, skb
))
7055 static void l2cap_att_channel(struct l2cap_conn
*conn
,
7056 struct sk_buff
*skb
)
7058 struct hci_conn
*hcon
= conn
->hcon
;
7059 struct l2cap_chan
*chan
;
7061 if (hcon
->type
!= LE_LINK
)
7064 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
7065 &hcon
->src
, &hcon
->dst
);
7069 BT_DBG("chan %p, len %d", chan
, skb
->len
);
7071 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
))
7074 if (chan
->imtu
< skb
->len
)
7077 if (!chan
->ops
->recv(chan
, skb
))
7084 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
7086 struct l2cap_hdr
*lh
= (void *) skb
->data
;
7090 skb_pull(skb
, L2CAP_HDR_SIZE
);
7091 cid
= __le16_to_cpu(lh
->cid
);
7092 len
= __le16_to_cpu(lh
->len
);
7094 if (len
!= skb
->len
) {
7099 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
7102 case L2CAP_CID_SIGNALING
:
7103 l2cap_sig_channel(conn
, skb
);
7106 case L2CAP_CID_CONN_LESS
:
7107 psm
= get_unaligned((__le16
*) skb
->data
);
7108 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
7109 l2cap_conless_channel(conn
, psm
, skb
);
7113 l2cap_att_channel(conn
, skb
);
7116 case L2CAP_CID_LE_SIGNALING
:
7117 l2cap_le_sig_channel(conn
, skb
);
7121 if (smp_sig_channel(conn
, skb
))
7122 l2cap_conn_del(conn
->hcon
, EACCES
);
7125 case L2CAP_FC_6LOWPAN
:
7126 bt_6lowpan_recv(conn
, skb
);
7130 l2cap_data_channel(conn
, cid
, skb
);
7135 /* ---- L2CAP interface with lower layer (HCI) ---- */
7137 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7139 int exact
= 0, lm1
= 0, lm2
= 0;
7140 struct l2cap_chan
*c
;
7142 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7144 /* Find listening sockets and check their link_mode */
7145 read_lock(&chan_list_lock
);
7146 list_for_each_entry(c
, &chan_list
, global_l
) {
7147 if (c
->state
!= BT_LISTEN
)
7150 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7151 lm1
|= HCI_LM_ACCEPT
;
7152 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7153 lm1
|= HCI_LM_MASTER
;
7155 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7156 lm2
|= HCI_LM_ACCEPT
;
7157 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7158 lm2
|= HCI_LM_MASTER
;
7161 read_unlock(&chan_list_lock
);
7163 return exact
? lm1
: lm2
;
7166 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7168 struct l2cap_conn
*conn
;
7170 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7173 conn
= l2cap_conn_add(hcon
);
7175 l2cap_conn_ready(conn
);
7177 l2cap_conn_del(hcon
, bt_to_errno(status
));
7181 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7183 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7185 BT_DBG("hcon %p", hcon
);
7188 return HCI_ERROR_REMOTE_USER_TERM
;
7189 return conn
->disc_reason
;
7192 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7194 BT_DBG("hcon %p reason %d", hcon
, reason
);
7196 bt_6lowpan_del_conn(hcon
->l2cap_data
);
7198 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7201 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7203 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7206 if (encrypt
== 0x00) {
7207 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7208 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7209 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
7210 l2cap_chan_close(chan
, ECONNREFUSED
);
7212 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7213 __clear_chan_timer(chan
);
7217 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7219 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7220 struct l2cap_chan
*chan
;
7225 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7227 if (hcon
->type
== LE_LINK
) {
7228 if (!status
&& encrypt
)
7229 smp_distribute_keys(conn
, 0);
7230 cancel_delayed_work(&conn
->security_timer
);
7233 mutex_lock(&conn
->chan_lock
);
7235 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7236 l2cap_chan_lock(chan
);
7238 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7239 state_to_string(chan
->state
));
7241 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
7242 l2cap_chan_unlock(chan
);
7246 if (chan
->scid
== L2CAP_CID_ATT
) {
7247 if (!status
&& encrypt
) {
7248 chan
->sec_level
= hcon
->sec_level
;
7249 l2cap_chan_ready(chan
);
7252 l2cap_chan_unlock(chan
);
7256 if (!__l2cap_no_conn_pending(chan
)) {
7257 l2cap_chan_unlock(chan
);
7261 if (!status
&& (chan
->state
== BT_CONNECTED
||
7262 chan
->state
== BT_CONFIG
)) {
7263 chan
->ops
->resume(chan
);
7264 l2cap_check_encryption(chan
, encrypt
);
7265 l2cap_chan_unlock(chan
);
7269 if (chan
->state
== BT_CONNECT
) {
7271 l2cap_start_connection(chan
);
7273 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7274 } else if (chan
->state
== BT_CONNECT2
) {
7275 struct l2cap_conn_rsp rsp
;
7279 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7280 res
= L2CAP_CR_PEND
;
7281 stat
= L2CAP_CS_AUTHOR_PEND
;
7282 chan
->ops
->defer(chan
);
7284 l2cap_state_change(chan
, BT_CONFIG
);
7285 res
= L2CAP_CR_SUCCESS
;
7286 stat
= L2CAP_CS_NO_INFO
;
7289 l2cap_state_change(chan
, BT_DISCONN
);
7290 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7291 res
= L2CAP_CR_SEC_BLOCK
;
7292 stat
= L2CAP_CS_NO_INFO
;
7295 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7296 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7297 rsp
.result
= cpu_to_le16(res
);
7298 rsp
.status
= cpu_to_le16(stat
);
7299 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7302 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7303 res
== L2CAP_CR_SUCCESS
) {
7305 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7306 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7308 l2cap_build_conf_req(chan
, buf
),
7310 chan
->num_conf_req
++;
7314 l2cap_chan_unlock(chan
);
7317 mutex_unlock(&conn
->chan_lock
);
7322 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7324 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7325 struct l2cap_hdr
*hdr
;
7328 /* For AMP controller do not create l2cap conn */
7329 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7333 conn
= l2cap_conn_add(hcon
);
7338 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7342 case ACL_START_NO_FLUSH
:
7345 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7346 kfree_skb(conn
->rx_skb
);
7347 conn
->rx_skb
= NULL
;
7349 l2cap_conn_unreliable(conn
, ECOMM
);
7352 /* Start fragment always begin with Basic L2CAP header */
7353 if (skb
->len
< L2CAP_HDR_SIZE
) {
7354 BT_ERR("Frame is too short (len %d)", skb
->len
);
7355 l2cap_conn_unreliable(conn
, ECOMM
);
7359 hdr
= (struct l2cap_hdr
*) skb
->data
;
7360 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7362 if (len
== skb
->len
) {
7363 /* Complete frame received */
7364 l2cap_recv_frame(conn
, skb
);
7368 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7370 if (skb
->len
> len
) {
7371 BT_ERR("Frame is too long (len %d, expected len %d)",
7373 l2cap_conn_unreliable(conn
, ECOMM
);
7377 /* Allocate skb for the complete frame (with header) */
7378 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7382 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7384 conn
->rx_len
= len
- skb
->len
;
7388 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7390 if (!conn
->rx_len
) {
7391 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7392 l2cap_conn_unreliable(conn
, ECOMM
);
7396 if (skb
->len
> conn
->rx_len
) {
7397 BT_ERR("Fragment is too long (len %d, expected %d)",
7398 skb
->len
, conn
->rx_len
);
7399 kfree_skb(conn
->rx_skb
);
7400 conn
->rx_skb
= NULL
;
7402 l2cap_conn_unreliable(conn
, ECOMM
);
7406 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7408 conn
->rx_len
-= skb
->len
;
7410 if (!conn
->rx_len
) {
7411 /* Complete frame received. l2cap_recv_frame
7412 * takes ownership of the skb so set the global
7413 * rx_skb pointer to NULL first.
7415 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7416 conn
->rx_skb
= NULL
;
7417 l2cap_recv_frame(conn
, rx_skb
);
7427 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7429 struct l2cap_chan
*c
;
7431 read_lock(&chan_list_lock
);
7433 list_for_each_entry(c
, &chan_list
, global_l
) {
7434 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7436 c
->state
, __le16_to_cpu(c
->psm
),
7437 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7438 c
->sec_level
, c
->mode
);
7441 read_unlock(&chan_list_lock
);
7446 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7448 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7451 static const struct file_operations l2cap_debugfs_fops
= {
7452 .open
= l2cap_debugfs_open
,
7454 .llseek
= seq_lseek
,
7455 .release
= single_release
,
7458 static struct dentry
*l2cap_debugfs
;
7460 int __init
l2cap_init(void)
7464 err
= l2cap_init_sockets();
7468 if (IS_ERR_OR_NULL(bt_debugfs
))
7471 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7472 NULL
, &l2cap_debugfs_fops
);
7474 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs
,
7476 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs
,
7484 void l2cap_exit(void)
7486 bt_6lowpan_cleanup();
7487 debugfs_remove(l2cap_debugfs
);
7488 l2cap_cleanup_sockets();
7491 module_param(disable_ertm
, bool, 0644);
7492 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");