2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
46 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
48 static LIST_HEAD(chan_list
);
49 static DEFINE_RWLOCK(chan_list_lock
);
51 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
52 u8 code
, u8 ident
, u16 dlen
, void *data
);
53 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
55 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
56 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
123 struct l2cap_chan
*c
;
125 list_for_each_entry(c
, &conn
->chan_l
, list
) {
126 if (c
->ident
== ident
)
132 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
135 struct l2cap_chan
*c
;
137 mutex_lock(&conn
->chan_lock
);
138 c
= __l2cap_get_chan_by_ident(conn
, ident
);
141 mutex_unlock(&conn
->chan_lock
);
146 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
148 struct l2cap_chan
*c
;
150 list_for_each_entry(c
, &chan_list
, global_l
) {
151 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
157 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
161 write_lock(&chan_list_lock
);
163 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
176 for (p
= 0x1001; p
< 0x1100; p
+= 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
178 chan
->psm
= cpu_to_le16(p
);
179 chan
->sport
= cpu_to_le16(p
);
186 write_unlock(&chan_list_lock
);
190 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
192 write_lock(&chan_list_lock
);
196 write_unlock(&chan_list_lock
);
201 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
203 u16 cid
= L2CAP_CID_DYN_START
;
205 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
206 if (!__l2cap_get_chan_by_scid(conn
, cid
))
213 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
215 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
216 state_to_string(state
));
219 chan
->ops
->state_change(chan
, state
);
222 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
224 struct sock
*sk
= chan
->sk
;
227 __l2cap_state_change(chan
, state
);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
233 struct sock
*sk
= chan
->sk
;
238 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
240 struct sock
*sk
= chan
->sk
;
243 __l2cap_chan_set_err(chan
, err
);
247 static void __set_retrans_timer(struct l2cap_chan
*chan
)
249 if (!delayed_work_pending(&chan
->monitor_timer
) &&
250 chan
->retrans_timeout
) {
251 l2cap_set_timer(chan
, &chan
->retrans_timer
,
252 msecs_to_jiffies(chan
->retrans_timeout
));
256 static void __set_monitor_timer(struct l2cap_chan
*chan
)
258 __clear_retrans_timer(chan
);
259 if (chan
->monitor_timeout
) {
260 l2cap_set_timer(chan
, &chan
->monitor_timer
,
261 msecs_to_jiffies(chan
->monitor_timeout
));
265 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
270 skb_queue_walk(head
, skb
) {
271 if (bt_cb(skb
)->control
.txseq
== seq
)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
291 size_t alloc_size
, i
;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size
= roundup_pow_of_two(size
);
299 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
303 seq_list
->mask
= alloc_size
- 1;
304 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
305 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 for (i
= 0; i
< alloc_size
; i
++)
307 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
314 kfree(seq_list
->list
);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
320 /* Constant-time check for list membership */
321 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
324 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
326 u16 mask
= seq_list
->mask
;
328 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR
;
331 } else if (seq_list
->head
== seq
) {
332 /* Head can be removed in constant time */
333 seq_list
->head
= seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
337 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 /* Walk the list to find the sequence number */
342 u16 prev
= seq_list
->head
;
343 while (seq_list
->list
[prev
& mask
] != seq
) {
344 prev
= seq_list
->list
[prev
& mask
];
345 if (prev
== L2CAP_SEQ_LIST_TAIL
)
346 return L2CAP_SEQ_LIST_CLEAR
;
349 /* Unlink the number from the list and clear it */
350 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
352 if (seq_list
->tail
== seq
)
353 seq_list
->tail
= prev
;
358 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 write_lock(&chan_list_lock
);
437 list_add(&chan
->global_l
, &chan_list
);
438 write_unlock(&chan_list_lock
);
440 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
442 chan
->state
= BT_OPEN
;
444 kref_init(&chan
->kref
);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
449 BT_DBG("chan %p", chan
);
454 static void l2cap_chan_destroy(struct kref
*kref
)
456 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
458 BT_DBG("chan %p", chan
);
460 write_lock(&chan_list_lock
);
461 list_del(&chan
->global_l
);
462 write_unlock(&chan_list_lock
);
467 void l2cap_chan_hold(struct l2cap_chan
*c
)
469 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
474 void l2cap_chan_put(struct l2cap_chan
*c
)
476 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 kref_put(&c
->kref
, l2cap_chan_destroy
);
481 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
483 chan
->fcs
= L2CAP_FCS_CRC16
;
484 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
485 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->sec_level
= BT_SECURITY_LOW
;
490 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
496 __le16_to_cpu(chan
->psm
), chan
->dcid
);
498 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
502 switch (chan
->chan_type
) {
503 case L2CAP_CHAN_CONN_ORIENTED
:
504 if (conn
->hcon
->type
== LE_LINK
) {
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
507 if (chan
->dcid
== L2CAP_CID_ATT
)
508 chan
->scid
= L2CAP_CID_ATT
;
510 chan
->scid
= l2cap_alloc_cid(conn
);
512 /* Alloc CID for connection-oriented socket */
513 chan
->scid
= l2cap_alloc_cid(conn
);
514 chan
->omtu
= L2CAP_DEFAULT_MTU
;
518 case L2CAP_CHAN_CONN_LESS
:
519 /* Connectionless socket */
520 chan
->scid
= L2CAP_CID_CONN_LESS
;
521 chan
->dcid
= L2CAP_CID_CONN_LESS
;
522 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_CONN_FIX_A2MP
:
526 chan
->scid
= L2CAP_CID_A2MP
;
527 chan
->dcid
= L2CAP_CID_A2MP
;
528 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
529 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
533 /* Raw socket can send/recv signalling messages only */
534 chan
->scid
= L2CAP_CID_SIGNALING
;
535 chan
->dcid
= L2CAP_CID_SIGNALING
;
536 chan
->omtu
= L2CAP_DEFAULT_MTU
;
539 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
540 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
541 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
542 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
543 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
544 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
546 l2cap_chan_hold(chan
);
548 hci_conn_hold(conn
->hcon
);
550 list_add(&chan
->list
, &conn
->chan_l
);
553 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
555 mutex_lock(&conn
->chan_lock
);
556 __l2cap_chan_add(conn
, chan
);
557 mutex_unlock(&conn
->chan_lock
);
560 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
562 struct l2cap_conn
*conn
= chan
->conn
;
564 __clear_chan_timer(chan
);
566 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
569 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
570 /* Delete from channel list */
571 list_del(&chan
->list
);
573 l2cap_chan_put(chan
);
577 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
578 hci_conn_drop(conn
->hcon
);
580 if (mgr
&& mgr
->bredr_chan
== chan
)
581 mgr
->bredr_chan
= NULL
;
584 if (chan
->hs_hchan
) {
585 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
588 amp_disconnect_logical_link(hs_hchan
);
591 chan
->ops
->teardown(chan
, err
);
593 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
597 case L2CAP_MODE_BASIC
:
600 case L2CAP_MODE_ERTM
:
601 __clear_retrans_timer(chan
);
602 __clear_monitor_timer(chan
);
603 __clear_ack_timer(chan
);
605 skb_queue_purge(&chan
->srej_q
);
607 l2cap_seq_list_free(&chan
->srej_list
);
608 l2cap_seq_list_free(&chan
->retrans_list
);
612 case L2CAP_MODE_STREAMING
:
613 skb_queue_purge(&chan
->tx_q
);
620 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
622 struct l2cap_conn
*conn
= chan
->conn
;
623 struct sock
*sk
= chan
->sk
;
625 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
628 switch (chan
->state
) {
630 chan
->ops
->teardown(chan
, 0);
635 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
636 conn
->hcon
->type
== ACL_LINK
) {
637 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
638 l2cap_send_disconn_req(chan
, reason
);
640 l2cap_chan_del(chan
, reason
);
644 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
645 conn
->hcon
->type
== ACL_LINK
) {
646 struct l2cap_conn_rsp rsp
;
649 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
650 result
= L2CAP_CR_SEC_BLOCK
;
652 result
= L2CAP_CR_BAD_PSM
;
653 l2cap_state_change(chan
, BT_DISCONN
);
655 rsp
.scid
= cpu_to_le16(chan
->dcid
);
656 rsp
.dcid
= cpu_to_le16(chan
->scid
);
657 rsp
.result
= cpu_to_le16(result
);
658 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
659 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
663 l2cap_chan_del(chan
, reason
);
668 l2cap_chan_del(chan
, reason
);
672 chan
->ops
->teardown(chan
, 0);
677 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
679 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
680 switch (chan
->sec_level
) {
681 case BT_SECURITY_HIGH
:
682 return HCI_AT_DEDICATED_BONDING_MITM
;
683 case BT_SECURITY_MEDIUM
:
684 return HCI_AT_DEDICATED_BONDING
;
686 return HCI_AT_NO_BONDING
;
688 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
689 if (chan
->sec_level
== BT_SECURITY_LOW
)
690 chan
->sec_level
= BT_SECURITY_SDP
;
692 if (chan
->sec_level
== BT_SECURITY_HIGH
)
693 return HCI_AT_NO_BONDING_MITM
;
695 return HCI_AT_NO_BONDING
;
697 switch (chan
->sec_level
) {
698 case BT_SECURITY_HIGH
:
699 return HCI_AT_GENERAL_BONDING_MITM
;
700 case BT_SECURITY_MEDIUM
:
701 return HCI_AT_GENERAL_BONDING
;
703 return HCI_AT_NO_BONDING
;
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
711 struct l2cap_conn
*conn
= chan
->conn
;
714 auth_type
= l2cap_get_auth_type(chan
);
716 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
719 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
723 /* Get next available identificator.
724 * 1 - 128 are used by kernel.
725 * 129 - 199 are reserved.
726 * 200 - 254 are used by utilities like l2ping, etc.
729 spin_lock(&conn
->lock
);
731 if (++conn
->tx_ident
> 128)
736 spin_unlock(&conn
->lock
);
741 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
744 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
747 BT_DBG("code 0x%2.2x", code
);
752 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
753 flags
= ACL_START_NO_FLUSH
;
757 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
758 skb
->priority
= HCI_PRIO_MAX
;
760 hci_send_acl(conn
->hchan
, skb
, flags
);
763 static bool __chan_is_moving(struct l2cap_chan
*chan
)
765 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
766 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
769 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
771 struct hci_conn
*hcon
= chan
->conn
->hcon
;
774 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
777 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
779 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
786 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
787 lmp_no_flush_capable(hcon
->hdev
))
788 flags
= ACL_START_NO_FLUSH
;
792 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
793 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
796 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
798 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
799 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
801 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
804 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
805 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
812 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
813 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
820 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
822 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
823 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
825 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
828 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
829 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
836 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
837 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
844 static inline void __unpack_control(struct l2cap_chan
*chan
,
847 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
848 __unpack_extended_control(get_unaligned_le32(skb
->data
),
849 &bt_cb(skb
)->control
);
850 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
852 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
853 &bt_cb(skb
)->control
);
854 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
858 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
862 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
863 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
865 if (control
->sframe
) {
866 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
867 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
868 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
870 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
871 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
877 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
881 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
882 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
884 if (control
->sframe
) {
885 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
886 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
887 packed
|= L2CAP_CTRL_FRAME_TYPE
;
889 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
890 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
896 static inline void __pack_control(struct l2cap_chan
*chan
,
897 struct l2cap_ctrl
*control
,
900 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
901 put_unaligned_le32(__pack_extended_control(control
),
902 skb
->data
+ L2CAP_HDR_SIZE
);
904 put_unaligned_le16(__pack_enhanced_control(control
),
905 skb
->data
+ L2CAP_HDR_SIZE
);
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
911 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
912 return L2CAP_EXT_HDR_SIZE
;
914 return L2CAP_ENH_HDR_SIZE
;
917 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
921 struct l2cap_hdr
*lh
;
922 int hlen
= __ertm_hdr_size(chan
);
924 if (chan
->fcs
== L2CAP_FCS_CRC16
)
925 hlen
+= L2CAP_FCS_SIZE
;
927 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
930 return ERR_PTR(-ENOMEM
);
932 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
933 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
934 lh
->cid
= cpu_to_le16(chan
->dcid
);
936 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
937 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
939 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
941 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
942 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
943 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
946 skb
->priority
= HCI_PRIO_MAX
;
950 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
951 struct l2cap_ctrl
*control
)
956 BT_DBG("chan %p, control %p", chan
, control
);
958 if (!control
->sframe
)
961 if (__chan_is_moving(chan
))
964 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
968 if (control
->super
== L2CAP_SUPER_RR
)
969 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
970 else if (control
->super
== L2CAP_SUPER_RNR
)
971 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
973 if (control
->super
!= L2CAP_SUPER_SREJ
) {
974 chan
->last_acked_seq
= control
->reqseq
;
975 __clear_ack_timer(chan
);
978 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
979 control
->final
, control
->poll
, control
->super
);
981 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
982 control_field
= __pack_extended_control(control
);
984 control_field
= __pack_enhanced_control(control
);
986 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
988 l2cap_do_send(chan
, skb
);
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
993 struct l2cap_ctrl control
;
995 BT_DBG("chan %p, poll %d", chan
, poll
);
997 memset(&control
, 0, sizeof(control
));
1001 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1002 control
.super
= L2CAP_SUPER_RNR
;
1004 control
.super
= L2CAP_SUPER_RR
;
1006 control
.reqseq
= chan
->buffer_seq
;
1007 l2cap_send_sframe(chan
, &control
);
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1012 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1015 static bool __amp_capable(struct l2cap_chan
*chan
)
1017 struct l2cap_conn
*conn
= chan
->conn
;
1020 hci_amp_capable() &&
1021 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
1022 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
1028 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1030 /* Check EFS parameters */
1034 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1036 struct l2cap_conn
*conn
= chan
->conn
;
1037 struct l2cap_conn_req req
;
1039 req
.scid
= cpu_to_le16(chan
->scid
);
1040 req
.psm
= chan
->psm
;
1042 chan
->ident
= l2cap_get_ident(conn
);
1044 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1046 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1049 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1051 struct l2cap_create_chan_req req
;
1052 req
.scid
= cpu_to_le16(chan
->scid
);
1053 req
.psm
= chan
->psm
;
1054 req
.amp_id
= amp_id
;
1056 chan
->ident
= l2cap_get_ident(chan
->conn
);
1058 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1062 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1064 struct sk_buff
*skb
;
1066 BT_DBG("chan %p", chan
);
1068 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1071 __clear_retrans_timer(chan
);
1072 __clear_monitor_timer(chan
);
1073 __clear_ack_timer(chan
);
1075 chan
->retry_count
= 0;
1076 skb_queue_walk(&chan
->tx_q
, skb
) {
1077 if (bt_cb(skb
)->control
.retries
)
1078 bt_cb(skb
)->control
.retries
= 1;
1083 chan
->expected_tx_seq
= chan
->buffer_seq
;
1085 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1086 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1087 l2cap_seq_list_clear(&chan
->retrans_list
);
1088 l2cap_seq_list_clear(&chan
->srej_list
);
1089 skb_queue_purge(&chan
->srej_q
);
1091 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1092 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1094 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1097 static void l2cap_move_done(struct l2cap_chan
*chan
)
1099 u8 move_role
= chan
->move_role
;
1100 BT_DBG("chan %p", chan
);
1102 chan
->move_state
= L2CAP_MOVE_STABLE
;
1103 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1105 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1108 switch (move_role
) {
1109 case L2CAP_MOVE_ROLE_INITIATOR
:
1110 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1111 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1113 case L2CAP_MOVE_ROLE_RESPONDER
:
1114 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1119 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1121 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 chan
->conf_state
= 0;
1123 __clear_chan_timer(chan
);
1125 chan
->state
= BT_CONNECTED
;
1127 chan
->ops
->ready(chan
);
1130 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1132 if (__amp_capable(chan
)) {
1133 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1134 a2mp_discover_amp(chan
);
1136 l2cap_send_conn_req(chan
);
1140 static void l2cap_do_start(struct l2cap_chan
*chan
)
1142 struct l2cap_conn
*conn
= chan
->conn
;
1144 if (conn
->hcon
->type
== LE_LINK
) {
1145 l2cap_chan_ready(chan
);
1149 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1150 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1153 if (l2cap_chan_check_security(chan
) &&
1154 __l2cap_no_conn_pending(chan
)) {
1155 l2cap_start_connection(chan
);
1158 struct l2cap_info_req req
;
1159 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1161 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1162 conn
->info_ident
= l2cap_get_ident(conn
);
1164 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1166 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1171 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1173 u32 local_feat_mask
= l2cap_feat_mask
;
1175 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1178 case L2CAP_MODE_ERTM
:
1179 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1180 case L2CAP_MODE_STREAMING
:
1181 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1187 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1189 struct sock
*sk
= chan
->sk
;
1190 struct l2cap_conn
*conn
= chan
->conn
;
1191 struct l2cap_disconn_req req
;
1196 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1197 __clear_retrans_timer(chan
);
1198 __clear_monitor_timer(chan
);
1199 __clear_ack_timer(chan
);
1202 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1203 l2cap_state_change(chan
, BT_DISCONN
);
1207 req
.dcid
= cpu_to_le16(chan
->dcid
);
1208 req
.scid
= cpu_to_le16(chan
->scid
);
1209 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1213 __l2cap_state_change(chan
, BT_DISCONN
);
1214 __l2cap_chan_set_err(chan
, err
);
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1221 struct l2cap_chan
*chan
, *tmp
;
1223 BT_DBG("conn %p", conn
);
1225 mutex_lock(&conn
->chan_lock
);
1227 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1228 struct sock
*sk
= chan
->sk
;
1230 l2cap_chan_lock(chan
);
1232 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1233 l2cap_chan_unlock(chan
);
1237 if (chan
->state
== BT_CONNECT
) {
1238 if (!l2cap_chan_check_security(chan
) ||
1239 !__l2cap_no_conn_pending(chan
)) {
1240 l2cap_chan_unlock(chan
);
1244 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1245 && test_bit(CONF_STATE2_DEVICE
,
1246 &chan
->conf_state
)) {
1247 l2cap_chan_close(chan
, ECONNRESET
);
1248 l2cap_chan_unlock(chan
);
1252 l2cap_start_connection(chan
);
1254 } else if (chan
->state
== BT_CONNECT2
) {
1255 struct l2cap_conn_rsp rsp
;
1257 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1258 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1260 if (l2cap_chan_check_security(chan
)) {
1262 if (test_bit(BT_SK_DEFER_SETUP
,
1263 &bt_sk(sk
)->flags
)) {
1264 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1265 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1266 chan
->ops
->defer(chan
);
1269 __l2cap_state_change(chan
, BT_CONFIG
);
1270 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1271 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1275 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1276 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1279 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1282 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1283 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1284 l2cap_chan_unlock(chan
);
1288 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1289 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1290 l2cap_build_conf_req(chan
, buf
), buf
);
1291 chan
->num_conf_req
++;
1294 l2cap_chan_unlock(chan
);
1297 mutex_unlock(&conn
->chan_lock
);
1300 /* Find socket with cid and source/destination bdaddr.
1301 * Returns closest match, locked.
1303 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1307 struct l2cap_chan
*c
, *c1
= NULL
;
1309 read_lock(&chan_list_lock
);
1311 list_for_each_entry(c
, &chan_list
, global_l
) {
1312 struct sock
*sk
= c
->sk
;
1314 if (state
&& c
->state
!= state
)
1317 if (c
->scid
== cid
) {
1318 int src_match
, dst_match
;
1319 int src_any
, dst_any
;
1322 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1323 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1324 if (src_match
&& dst_match
) {
1325 read_unlock(&chan_list_lock
);
1330 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1331 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1332 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1333 (src_any
&& dst_any
))
1338 read_unlock(&chan_list_lock
);
1343 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1345 struct sock
*parent
;
1346 struct l2cap_chan
*chan
, *pchan
;
1350 /* Check if we have socket listening on cid */
1351 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1352 conn
->src
, conn
->dst
);
1356 /* Client ATT sockets should override the server one */
1357 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1364 chan
= pchan
->ops
->new_connection(pchan
);
1368 chan
->dcid
= L2CAP_CID_ATT
;
1370 bacpy(&bt_sk(chan
->sk
)->src
, conn
->src
);
1371 bacpy(&bt_sk(chan
->sk
)->dst
, conn
->dst
);
1373 __l2cap_chan_add(conn
, chan
);
1376 release_sock(parent
);
1379 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1381 struct l2cap_chan
*chan
;
1382 struct hci_conn
*hcon
= conn
->hcon
;
1384 BT_DBG("conn %p", conn
);
1386 /* For outgoing pairing which doesn't necessarily have an
1387 * associated socket (e.g. mgmt_pair_device).
1389 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1390 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1392 mutex_lock(&conn
->chan_lock
);
1394 if (hcon
->type
== LE_LINK
)
1395 l2cap_le_conn_ready(conn
);
1397 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1399 l2cap_chan_lock(chan
);
1401 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1402 l2cap_chan_unlock(chan
);
1406 if (hcon
->type
== LE_LINK
) {
1407 if (smp_conn_security(hcon
, chan
->sec_level
))
1408 l2cap_chan_ready(chan
);
1410 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1411 struct sock
*sk
= chan
->sk
;
1412 __clear_chan_timer(chan
);
1414 __l2cap_state_change(chan
, BT_CONNECTED
);
1415 sk
->sk_state_change(sk
);
1418 } else if (chan
->state
== BT_CONNECT
)
1419 l2cap_do_start(chan
);
1421 l2cap_chan_unlock(chan
);
1424 mutex_unlock(&conn
->chan_lock
);
1427 /* Notify sockets that we cannot guaranty reliability anymore */
1428 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1430 struct l2cap_chan
*chan
;
1432 BT_DBG("conn %p", conn
);
1434 mutex_lock(&conn
->chan_lock
);
1436 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1437 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1438 l2cap_chan_set_err(chan
, err
);
1441 mutex_unlock(&conn
->chan_lock
);
1444 static void l2cap_info_timeout(struct work_struct
*work
)
1446 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1449 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1450 conn
->info_ident
= 0;
1452 l2cap_conn_start(conn
);
1457 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1458 * callback is called during registration. The ->remove callback is called
1459 * during unregistration.
1460 * An l2cap_user object can either be explicitly unregistered or when the
1461 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1462 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1463 * External modules must own a reference to the l2cap_conn object if they intend
1464 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1465 * any time if they don't.
1468 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1470 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1473 /* We need to check whether l2cap_conn is registered. If it is not, we
1474 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1475 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1476 * relies on the parent hci_conn object to be locked. This itself relies
1477 * on the hci_dev object to be locked. So we must lock the hci device
1482 if (user
->list
.next
|| user
->list
.prev
) {
1487 /* conn->hchan is NULL after l2cap_conn_del() was called */
1493 ret
= user
->probe(conn
, user
);
1497 list_add(&user
->list
, &conn
->users
);
1501 hci_dev_unlock(hdev
);
1504 EXPORT_SYMBOL(l2cap_register_user
);
1506 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1508 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1512 if (!user
->list
.next
|| !user
->list
.prev
)
1515 list_del(&user
->list
);
1516 user
->list
.next
= NULL
;
1517 user
->list
.prev
= NULL
;
1518 user
->remove(conn
, user
);
1521 hci_dev_unlock(hdev
);
1523 EXPORT_SYMBOL(l2cap_unregister_user
);
1525 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1527 struct l2cap_user
*user
;
1529 while (!list_empty(&conn
->users
)) {
1530 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1531 list_del(&user
->list
);
1532 user
->list
.next
= NULL
;
1533 user
->list
.prev
= NULL
;
1534 user
->remove(conn
, user
);
1538 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1540 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1541 struct l2cap_chan
*chan
, *l
;
1546 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1548 kfree_skb(conn
->rx_skb
);
1550 l2cap_unregister_all_users(conn
);
1552 mutex_lock(&conn
->chan_lock
);
1555 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1556 l2cap_chan_hold(chan
);
1557 l2cap_chan_lock(chan
);
1559 l2cap_chan_del(chan
, err
);
1561 l2cap_chan_unlock(chan
);
1563 chan
->ops
->close(chan
);
1564 l2cap_chan_put(chan
);
1567 mutex_unlock(&conn
->chan_lock
);
1569 hci_chan_del(conn
->hchan
);
1571 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1572 cancel_delayed_work_sync(&conn
->info_timer
);
1574 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1575 cancel_delayed_work_sync(&conn
->security_timer
);
1576 smp_chan_destroy(conn
);
1579 hcon
->l2cap_data
= NULL
;
1581 l2cap_conn_put(conn
);
1584 static void security_timeout(struct work_struct
*work
)
1586 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1587 security_timer
.work
);
1589 BT_DBG("conn %p", conn
);
1591 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1592 smp_chan_destroy(conn
);
1593 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1597 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1599 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1600 struct hci_chan
*hchan
;
1605 hchan
= hci_chan_create(hcon
);
1609 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1611 hci_chan_del(hchan
);
1615 kref_init(&conn
->ref
);
1616 hcon
->l2cap_data
= conn
;
1618 hci_conn_get(conn
->hcon
);
1619 conn
->hchan
= hchan
;
1621 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1623 switch (hcon
->type
) {
1625 if (hcon
->hdev
->le_mtu
) {
1626 conn
->mtu
= hcon
->hdev
->le_mtu
;
1631 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1635 conn
->src
= &hcon
->hdev
->bdaddr
;
1636 conn
->dst
= &hcon
->dst
;
1638 conn
->feat_mask
= 0;
1640 spin_lock_init(&conn
->lock
);
1641 mutex_init(&conn
->chan_lock
);
1643 INIT_LIST_HEAD(&conn
->chan_l
);
1644 INIT_LIST_HEAD(&conn
->users
);
1646 if (hcon
->type
== LE_LINK
)
1647 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1649 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1651 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1656 static void l2cap_conn_free(struct kref
*ref
)
1658 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1660 hci_conn_put(conn
->hcon
);
1664 void l2cap_conn_get(struct l2cap_conn
*conn
)
1666 kref_get(&conn
->ref
);
1668 EXPORT_SYMBOL(l2cap_conn_get
);
1670 void l2cap_conn_put(struct l2cap_conn
*conn
)
1672 kref_put(&conn
->ref
, l2cap_conn_free
);
1674 EXPORT_SYMBOL(l2cap_conn_put
);
1676 /* ---- Socket interface ---- */
1678 /* Find socket with psm and source / destination bdaddr.
1679 * Returns closest match.
1681 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1685 struct l2cap_chan
*c
, *c1
= NULL
;
1687 read_lock(&chan_list_lock
);
1689 list_for_each_entry(c
, &chan_list
, global_l
) {
1690 struct sock
*sk
= c
->sk
;
1692 if (state
&& c
->state
!= state
)
1695 if (c
->psm
== psm
) {
1696 int src_match
, dst_match
;
1697 int src_any
, dst_any
;
1700 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1701 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1702 if (src_match
&& dst_match
) {
1703 read_unlock(&chan_list_lock
);
1708 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1709 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1710 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1711 (src_any
&& dst_any
))
1716 read_unlock(&chan_list_lock
);
1721 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1722 bdaddr_t
*dst
, u8 dst_type
)
1724 struct sock
*sk
= chan
->sk
;
1725 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1726 struct l2cap_conn
*conn
;
1727 struct hci_conn
*hcon
;
1728 struct hci_dev
*hdev
;
1732 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1733 dst_type
, __le16_to_cpu(psm
));
1735 hdev
= hci_get_route(dst
, src
);
1737 return -EHOSTUNREACH
;
1741 l2cap_chan_lock(chan
);
1743 /* PSM must be odd and lsb of upper byte must be 0 */
1744 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1745 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1750 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1755 switch (chan
->mode
) {
1756 case L2CAP_MODE_BASIC
:
1758 case L2CAP_MODE_ERTM
:
1759 case L2CAP_MODE_STREAMING
:
1768 switch (chan
->state
) {
1772 /* Already connecting */
1777 /* Already connected */
1791 /* Set destination address and psm */
1793 bacpy(&bt_sk(sk
)->dst
, dst
);
1799 auth_type
= l2cap_get_auth_type(chan
);
1801 if (bdaddr_type_is_le(dst_type
))
1802 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1803 chan
->sec_level
, auth_type
);
1805 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1806 chan
->sec_level
, auth_type
);
1809 err
= PTR_ERR(hcon
);
1813 conn
= l2cap_conn_add(hcon
);
1815 hci_conn_drop(hcon
);
1820 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1821 hci_conn_drop(hcon
);
1826 /* Update source addr of the socket */
1827 bacpy(src
, conn
->src
);
1829 l2cap_chan_unlock(chan
);
1830 l2cap_chan_add(conn
, chan
);
1831 l2cap_chan_lock(chan
);
1833 /* l2cap_chan_add takes its own ref so we can drop this one */
1834 hci_conn_drop(hcon
);
1836 l2cap_state_change(chan
, BT_CONNECT
);
1837 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1839 if (hcon
->state
== BT_CONNECTED
) {
1840 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1841 __clear_chan_timer(chan
);
1842 if (l2cap_chan_check_security(chan
))
1843 l2cap_state_change(chan
, BT_CONNECTED
);
1845 l2cap_do_start(chan
);
1851 l2cap_chan_unlock(chan
);
1852 hci_dev_unlock(hdev
);
1857 int __l2cap_wait_ack(struct sock
*sk
)
1859 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1860 DECLARE_WAITQUEUE(wait
, current
);
1864 add_wait_queue(sk_sleep(sk
), &wait
);
1865 set_current_state(TASK_INTERRUPTIBLE
);
1866 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1870 if (signal_pending(current
)) {
1871 err
= sock_intr_errno(timeo
);
1876 timeo
= schedule_timeout(timeo
);
1878 set_current_state(TASK_INTERRUPTIBLE
);
1880 err
= sock_error(sk
);
1884 set_current_state(TASK_RUNNING
);
1885 remove_wait_queue(sk_sleep(sk
), &wait
);
1889 static void l2cap_monitor_timeout(struct work_struct
*work
)
1891 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1892 monitor_timer
.work
);
1894 BT_DBG("chan %p", chan
);
1896 l2cap_chan_lock(chan
);
1899 l2cap_chan_unlock(chan
);
1900 l2cap_chan_put(chan
);
1904 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1906 l2cap_chan_unlock(chan
);
1907 l2cap_chan_put(chan
);
1910 static void l2cap_retrans_timeout(struct work_struct
*work
)
1912 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1913 retrans_timer
.work
);
1915 BT_DBG("chan %p", chan
);
1917 l2cap_chan_lock(chan
);
1920 l2cap_chan_unlock(chan
);
1921 l2cap_chan_put(chan
);
1925 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1926 l2cap_chan_unlock(chan
);
1927 l2cap_chan_put(chan
);
1930 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1931 struct sk_buff_head
*skbs
)
1933 struct sk_buff
*skb
;
1934 struct l2cap_ctrl
*control
;
1936 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1938 if (__chan_is_moving(chan
))
1941 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1943 while (!skb_queue_empty(&chan
->tx_q
)) {
1945 skb
= skb_dequeue(&chan
->tx_q
);
1947 bt_cb(skb
)->control
.retries
= 1;
1948 control
= &bt_cb(skb
)->control
;
1950 control
->reqseq
= 0;
1951 control
->txseq
= chan
->next_tx_seq
;
1953 __pack_control(chan
, control
, skb
);
1955 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1956 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1957 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1960 l2cap_do_send(chan
, skb
);
1962 BT_DBG("Sent txseq %u", control
->txseq
);
1964 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1965 chan
->frames_sent
++;
1969 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1971 struct sk_buff
*skb
, *tx_skb
;
1972 struct l2cap_ctrl
*control
;
1975 BT_DBG("chan %p", chan
);
1977 if (chan
->state
!= BT_CONNECTED
)
1980 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1983 if (__chan_is_moving(chan
))
1986 while (chan
->tx_send_head
&&
1987 chan
->unacked_frames
< chan
->remote_tx_win
&&
1988 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1990 skb
= chan
->tx_send_head
;
1992 bt_cb(skb
)->control
.retries
= 1;
1993 control
= &bt_cb(skb
)->control
;
1995 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1998 control
->reqseq
= chan
->buffer_seq
;
1999 chan
->last_acked_seq
= chan
->buffer_seq
;
2000 control
->txseq
= chan
->next_tx_seq
;
2002 __pack_control(chan
, control
, skb
);
2004 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2005 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2006 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2009 /* Clone after data has been modified. Data is assumed to be
2010 read-only (for locking purposes) on cloned sk_buffs.
2012 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2017 __set_retrans_timer(chan
);
2019 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2020 chan
->unacked_frames
++;
2021 chan
->frames_sent
++;
2024 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2025 chan
->tx_send_head
= NULL
;
2027 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2029 l2cap_do_send(chan
, tx_skb
);
2030 BT_DBG("Sent txseq %u", control
->txseq
);
2033 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2034 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2039 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2041 struct l2cap_ctrl control
;
2042 struct sk_buff
*skb
;
2043 struct sk_buff
*tx_skb
;
2046 BT_DBG("chan %p", chan
);
2048 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2051 if (__chan_is_moving(chan
))
2054 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2055 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2057 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2059 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2064 bt_cb(skb
)->control
.retries
++;
2065 control
= bt_cb(skb
)->control
;
2067 if (chan
->max_tx
!= 0 &&
2068 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2069 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2070 l2cap_send_disconn_req(chan
, ECONNRESET
);
2071 l2cap_seq_list_clear(&chan
->retrans_list
);
2075 control
.reqseq
= chan
->buffer_seq
;
2076 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2081 if (skb_cloned(skb
)) {
2082 /* Cloned sk_buffs are read-only, so we need a
2085 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2087 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2091 l2cap_seq_list_clear(&chan
->retrans_list
);
2095 /* Update skb contents */
2096 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2097 put_unaligned_le32(__pack_extended_control(&control
),
2098 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2100 put_unaligned_le16(__pack_enhanced_control(&control
),
2101 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2104 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2105 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2106 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2110 l2cap_do_send(chan
, tx_skb
);
2112 BT_DBG("Resent txseq %d", control
.txseq
);
2114 chan
->last_acked_seq
= chan
->buffer_seq
;
2118 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2119 struct l2cap_ctrl
*control
)
2121 BT_DBG("chan %p, control %p", chan
, control
);
2123 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2124 l2cap_ertm_resend(chan
);
2127 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2128 struct l2cap_ctrl
*control
)
2130 struct sk_buff
*skb
;
2132 BT_DBG("chan %p, control %p", chan
, control
);
2135 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2137 l2cap_seq_list_clear(&chan
->retrans_list
);
2139 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2142 if (chan
->unacked_frames
) {
2143 skb_queue_walk(&chan
->tx_q
, skb
) {
2144 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2145 skb
== chan
->tx_send_head
)
2149 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2150 if (skb
== chan
->tx_send_head
)
2153 l2cap_seq_list_append(&chan
->retrans_list
,
2154 bt_cb(skb
)->control
.txseq
);
2157 l2cap_ertm_resend(chan
);
2161 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2163 struct l2cap_ctrl control
;
2164 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2165 chan
->last_acked_seq
);
2168 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2169 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2171 memset(&control
, 0, sizeof(control
));
2174 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2175 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2176 __clear_ack_timer(chan
);
2177 control
.super
= L2CAP_SUPER_RNR
;
2178 control
.reqseq
= chan
->buffer_seq
;
2179 l2cap_send_sframe(chan
, &control
);
2181 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2182 l2cap_ertm_send(chan
);
2183 /* If any i-frames were sent, they included an ack */
2184 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2188 /* Ack now if the window is 3/4ths full.
2189 * Calculate without mul or div
2191 threshold
= chan
->ack_win
;
2192 threshold
+= threshold
<< 1;
2195 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2198 if (frames_to_ack
>= threshold
) {
2199 __clear_ack_timer(chan
);
2200 control
.super
= L2CAP_SUPER_RR
;
2201 control
.reqseq
= chan
->buffer_seq
;
2202 l2cap_send_sframe(chan
, &control
);
2207 __set_ack_timer(chan
);
2211 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2212 struct msghdr
*msg
, int len
,
2213 int count
, struct sk_buff
*skb
)
2215 struct l2cap_conn
*conn
= chan
->conn
;
2216 struct sk_buff
**frag
;
2219 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2225 /* Continuation fragments (no L2CAP header) */
2226 frag
= &skb_shinfo(skb
)->frag_list
;
2228 struct sk_buff
*tmp
;
2230 count
= min_t(unsigned int, conn
->mtu
, len
);
2232 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2233 msg
->msg_flags
& MSG_DONTWAIT
);
2235 return PTR_ERR(tmp
);
2239 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2242 (*frag
)->priority
= skb
->priority
;
2247 skb
->len
+= (*frag
)->len
;
2248 skb
->data_len
+= (*frag
)->len
;
2250 frag
= &(*frag
)->next
;
2256 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2257 struct msghdr
*msg
, size_t len
,
2260 struct l2cap_conn
*conn
= chan
->conn
;
2261 struct sk_buff
*skb
;
2262 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2263 struct l2cap_hdr
*lh
;
2265 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2267 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2269 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2270 msg
->msg_flags
& MSG_DONTWAIT
);
2274 skb
->priority
= priority
;
2276 /* Create L2CAP header */
2277 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2278 lh
->cid
= cpu_to_le16(chan
->dcid
);
2279 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2280 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2282 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2283 if (unlikely(err
< 0)) {
2285 return ERR_PTR(err
);
2290 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2291 struct msghdr
*msg
, size_t len
,
2294 struct l2cap_conn
*conn
= chan
->conn
;
2295 struct sk_buff
*skb
;
2297 struct l2cap_hdr
*lh
;
2299 BT_DBG("chan %p len %zu", chan
, len
);
2301 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2303 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2304 msg
->msg_flags
& MSG_DONTWAIT
);
2308 skb
->priority
= priority
;
2310 /* Create L2CAP header */
2311 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2312 lh
->cid
= cpu_to_le16(chan
->dcid
);
2313 lh
->len
= cpu_to_le16(len
);
2315 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2316 if (unlikely(err
< 0)) {
2318 return ERR_PTR(err
);
2323 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2324 struct msghdr
*msg
, size_t len
,
2327 struct l2cap_conn
*conn
= chan
->conn
;
2328 struct sk_buff
*skb
;
2329 int err
, count
, hlen
;
2330 struct l2cap_hdr
*lh
;
2332 BT_DBG("chan %p len %zu", chan
, len
);
2335 return ERR_PTR(-ENOTCONN
);
2337 hlen
= __ertm_hdr_size(chan
);
2340 hlen
+= L2CAP_SDULEN_SIZE
;
2342 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2343 hlen
+= L2CAP_FCS_SIZE
;
2345 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2347 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2348 msg
->msg_flags
& MSG_DONTWAIT
);
2352 /* Create L2CAP header */
2353 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2354 lh
->cid
= cpu_to_le16(chan
->dcid
);
2355 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2357 /* Control header is populated later */
2358 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2359 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2361 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2364 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2366 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2367 if (unlikely(err
< 0)) {
2369 return ERR_PTR(err
);
2372 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2373 bt_cb(skb
)->control
.retries
= 0;
2377 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2378 struct sk_buff_head
*seg_queue
,
2379 struct msghdr
*msg
, size_t len
)
2381 struct sk_buff
*skb
;
2386 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2388 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2389 * so fragmented skbs are not used. The HCI layer's handling
2390 * of fragmented skbs is not compatible with ERTM's queueing.
2393 /* PDU size is derived from the HCI MTU */
2394 pdu_len
= chan
->conn
->mtu
;
2396 /* Constrain PDU size for BR/EDR connections */
2398 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2400 /* Adjust for largest possible L2CAP overhead. */
2402 pdu_len
-= L2CAP_FCS_SIZE
;
2404 pdu_len
-= __ertm_hdr_size(chan
);
2406 /* Remote device may have requested smaller PDUs */
2407 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2409 if (len
<= pdu_len
) {
2410 sar
= L2CAP_SAR_UNSEGMENTED
;
2414 sar
= L2CAP_SAR_START
;
2416 pdu_len
-= L2CAP_SDULEN_SIZE
;
2420 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2423 __skb_queue_purge(seg_queue
);
2424 return PTR_ERR(skb
);
2427 bt_cb(skb
)->control
.sar
= sar
;
2428 __skb_queue_tail(seg_queue
, skb
);
2433 pdu_len
+= L2CAP_SDULEN_SIZE
;
2436 if (len
<= pdu_len
) {
2437 sar
= L2CAP_SAR_END
;
2440 sar
= L2CAP_SAR_CONTINUE
;
2447 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2450 struct sk_buff
*skb
;
2452 struct sk_buff_head seg_queue
;
2454 /* Connectionless channel */
2455 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2456 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2458 return PTR_ERR(skb
);
2460 l2cap_do_send(chan
, skb
);
2464 switch (chan
->mode
) {
2465 case L2CAP_MODE_BASIC
:
2466 /* Check outgoing MTU */
2467 if (len
> chan
->omtu
)
2470 /* Create a basic PDU */
2471 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2473 return PTR_ERR(skb
);
2475 l2cap_do_send(chan
, skb
);
2479 case L2CAP_MODE_ERTM
:
2480 case L2CAP_MODE_STREAMING
:
2481 /* Check outgoing MTU */
2482 if (len
> chan
->omtu
) {
2487 __skb_queue_head_init(&seg_queue
);
2489 /* Do segmentation before calling in to the state machine,
2490 * since it's possible to block while waiting for memory
2493 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2495 /* The channel could have been closed while segmenting,
2496 * check that it is still connected.
2498 if (chan
->state
!= BT_CONNECTED
) {
2499 __skb_queue_purge(&seg_queue
);
2506 if (chan
->mode
== L2CAP_MODE_ERTM
)
2507 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2509 l2cap_streaming_send(chan
, &seg_queue
);
2513 /* If the skbs were not queued for sending, they'll still be in
2514 * seg_queue and need to be purged.
2516 __skb_queue_purge(&seg_queue
);
2520 BT_DBG("bad state %1.1x", chan
->mode
);
2527 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2529 struct l2cap_ctrl control
;
2532 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2534 memset(&control
, 0, sizeof(control
));
2536 control
.super
= L2CAP_SUPER_SREJ
;
2538 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2539 seq
= __next_seq(chan
, seq
)) {
2540 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2541 control
.reqseq
= seq
;
2542 l2cap_send_sframe(chan
, &control
);
2543 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2547 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2550 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2552 struct l2cap_ctrl control
;
2554 BT_DBG("chan %p", chan
);
2556 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2559 memset(&control
, 0, sizeof(control
));
2561 control
.super
= L2CAP_SUPER_SREJ
;
2562 control
.reqseq
= chan
->srej_list
.tail
;
2563 l2cap_send_sframe(chan
, &control
);
2566 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2568 struct l2cap_ctrl control
;
2572 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2574 memset(&control
, 0, sizeof(control
));
2576 control
.super
= L2CAP_SUPER_SREJ
;
2578 /* Capture initial list head to allow only one pass through the list. */
2579 initial_head
= chan
->srej_list
.head
;
2582 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2583 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2586 control
.reqseq
= seq
;
2587 l2cap_send_sframe(chan
, &control
);
2588 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2589 } while (chan
->srej_list
.head
!= initial_head
);
2592 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2594 struct sk_buff
*acked_skb
;
2597 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2599 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2602 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2603 chan
->expected_ack_seq
, chan
->unacked_frames
);
2605 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2606 ackseq
= __next_seq(chan
, ackseq
)) {
2608 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2610 skb_unlink(acked_skb
, &chan
->tx_q
);
2611 kfree_skb(acked_skb
);
2612 chan
->unacked_frames
--;
2616 chan
->expected_ack_seq
= reqseq
;
2618 if (chan
->unacked_frames
== 0)
2619 __clear_retrans_timer(chan
);
2621 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2624 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2626 BT_DBG("chan %p", chan
);
2628 chan
->expected_tx_seq
= chan
->buffer_seq
;
2629 l2cap_seq_list_clear(&chan
->srej_list
);
2630 skb_queue_purge(&chan
->srej_q
);
2631 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2634 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2635 struct l2cap_ctrl
*control
,
2636 struct sk_buff_head
*skbs
, u8 event
)
2638 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2642 case L2CAP_EV_DATA_REQUEST
:
2643 if (chan
->tx_send_head
== NULL
)
2644 chan
->tx_send_head
= skb_peek(skbs
);
2646 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2647 l2cap_ertm_send(chan
);
2649 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2650 BT_DBG("Enter LOCAL_BUSY");
2651 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2653 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2654 /* The SREJ_SENT state must be aborted if we are to
2655 * enter the LOCAL_BUSY state.
2657 l2cap_abort_rx_srej_sent(chan
);
2660 l2cap_send_ack(chan
);
2663 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2664 BT_DBG("Exit LOCAL_BUSY");
2665 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2667 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2668 struct l2cap_ctrl local_control
;
2670 memset(&local_control
, 0, sizeof(local_control
));
2671 local_control
.sframe
= 1;
2672 local_control
.super
= L2CAP_SUPER_RR
;
2673 local_control
.poll
= 1;
2674 local_control
.reqseq
= chan
->buffer_seq
;
2675 l2cap_send_sframe(chan
, &local_control
);
2677 chan
->retry_count
= 1;
2678 __set_monitor_timer(chan
);
2679 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2682 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2683 l2cap_process_reqseq(chan
, control
->reqseq
);
2685 case L2CAP_EV_EXPLICIT_POLL
:
2686 l2cap_send_rr_or_rnr(chan
, 1);
2687 chan
->retry_count
= 1;
2688 __set_monitor_timer(chan
);
2689 __clear_ack_timer(chan
);
2690 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2692 case L2CAP_EV_RETRANS_TO
:
2693 l2cap_send_rr_or_rnr(chan
, 1);
2694 chan
->retry_count
= 1;
2695 __set_monitor_timer(chan
);
2696 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2698 case L2CAP_EV_RECV_FBIT
:
2699 /* Nothing to process */
2706 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2707 struct l2cap_ctrl
*control
,
2708 struct sk_buff_head
*skbs
, u8 event
)
2710 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2714 case L2CAP_EV_DATA_REQUEST
:
2715 if (chan
->tx_send_head
== NULL
)
2716 chan
->tx_send_head
= skb_peek(skbs
);
2717 /* Queue data, but don't send. */
2718 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2724 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2728 l2cap_abort_rx_srej_sent(chan
);
2731 l2cap_send_ack(chan
);
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2738 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2739 struct l2cap_ctrl local_control
;
2740 memset(&local_control
, 0, sizeof(local_control
));
2741 local_control
.sframe
= 1;
2742 local_control
.super
= L2CAP_SUPER_RR
;
2743 local_control
.poll
= 1;
2744 local_control
.reqseq
= chan
->buffer_seq
;
2745 l2cap_send_sframe(chan
, &local_control
);
2747 chan
->retry_count
= 1;
2748 __set_monitor_timer(chan
);
2749 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2752 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2753 l2cap_process_reqseq(chan
, control
->reqseq
);
2757 case L2CAP_EV_RECV_FBIT
:
2758 if (control
&& control
->final
) {
2759 __clear_monitor_timer(chan
);
2760 if (chan
->unacked_frames
> 0)
2761 __set_retrans_timer(chan
);
2762 chan
->retry_count
= 0;
2763 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2764 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2767 case L2CAP_EV_EXPLICIT_POLL
:
2770 case L2CAP_EV_MONITOR_TO
:
2771 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2772 l2cap_send_rr_or_rnr(chan
, 1);
2773 __set_monitor_timer(chan
);
2774 chan
->retry_count
++;
2776 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2784 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2785 struct sk_buff_head
*skbs
, u8 event
)
2787 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 chan
, control
, skbs
, event
, chan
->tx_state
);
2790 switch (chan
->tx_state
) {
2791 case L2CAP_TX_STATE_XMIT
:
2792 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2794 case L2CAP_TX_STATE_WAIT_F
:
2795 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2803 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2804 struct l2cap_ctrl
*control
)
2806 BT_DBG("chan %p, control %p", chan
, control
);
2807 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2810 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2811 struct l2cap_ctrl
*control
)
2813 BT_DBG("chan %p, control %p", chan
, control
);
2814 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2817 /* Copy frame to all raw sockets on that connection */
2818 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2820 struct sk_buff
*nskb
;
2821 struct l2cap_chan
*chan
;
2823 BT_DBG("conn %p", conn
);
2825 mutex_lock(&conn
->chan_lock
);
2827 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2828 struct sock
*sk
= chan
->sk
;
2829 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2832 /* Don't send frame to the socket it came from */
2835 nskb
= skb_clone(skb
, GFP_KERNEL
);
2839 if (chan
->ops
->recv(chan
, nskb
))
2843 mutex_unlock(&conn
->chan_lock
);
2846 /* ---- L2CAP signalling commands ---- */
2847 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2848 u8 ident
, u16 dlen
, void *data
)
2850 struct sk_buff
*skb
, **frag
;
2851 struct l2cap_cmd_hdr
*cmd
;
2852 struct l2cap_hdr
*lh
;
2855 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2856 conn
, code
, ident
, dlen
);
2858 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2861 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2862 count
= min_t(unsigned int, conn
->mtu
, len
);
2864 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2868 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2869 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2871 if (conn
->hcon
->type
== LE_LINK
)
2872 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2874 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2876 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2879 cmd
->len
= cpu_to_le16(dlen
);
2882 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2883 memcpy(skb_put(skb
, count
), data
, count
);
2889 /* Continuation fragments (no L2CAP header) */
2890 frag
= &skb_shinfo(skb
)->frag_list
;
2892 count
= min_t(unsigned int, conn
->mtu
, len
);
2894 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2898 memcpy(skb_put(*frag
, count
), data
, count
);
2903 frag
= &(*frag
)->next
;
2913 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2916 struct l2cap_conf_opt
*opt
= *ptr
;
2919 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2927 *val
= *((u8
*) opt
->val
);
2931 *val
= get_unaligned_le16(opt
->val
);
2935 *val
= get_unaligned_le32(opt
->val
);
2939 *val
= (unsigned long) opt
->val
;
2943 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2947 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2949 struct l2cap_conf_opt
*opt
= *ptr
;
2951 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2958 *((u8
*) opt
->val
) = val
;
2962 put_unaligned_le16(val
, opt
->val
);
2966 put_unaligned_le32(val
, opt
->val
);
2970 memcpy(opt
->val
, (void *) val
, len
);
2974 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2977 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2979 struct l2cap_conf_efs efs
;
2981 switch (chan
->mode
) {
2982 case L2CAP_MODE_ERTM
:
2983 efs
.id
= chan
->local_id
;
2984 efs
.stype
= chan
->local_stype
;
2985 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2986 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2987 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2988 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2991 case L2CAP_MODE_STREAMING
:
2993 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2994 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2995 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3004 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3005 (unsigned long) &efs
);
3008 static void l2cap_ack_timeout(struct work_struct
*work
)
3010 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3014 BT_DBG("chan %p", chan
);
3016 l2cap_chan_lock(chan
);
3018 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3019 chan
->last_acked_seq
);
3022 l2cap_send_rr_or_rnr(chan
, 0);
3024 l2cap_chan_unlock(chan
);
3025 l2cap_chan_put(chan
);
3028 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3032 chan
->next_tx_seq
= 0;
3033 chan
->expected_tx_seq
= 0;
3034 chan
->expected_ack_seq
= 0;
3035 chan
->unacked_frames
= 0;
3036 chan
->buffer_seq
= 0;
3037 chan
->frames_sent
= 0;
3038 chan
->last_acked_seq
= 0;
3040 chan
->sdu_last_frag
= NULL
;
3043 skb_queue_head_init(&chan
->tx_q
);
3045 chan
->local_amp_id
= 0;
3047 chan
->move_state
= L2CAP_MOVE_STABLE
;
3048 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3050 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3053 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3054 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3056 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3057 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3058 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3060 skb_queue_head_init(&chan
->srej_q
);
3062 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3066 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3068 l2cap_seq_list_free(&chan
->srej_list
);
3073 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3076 case L2CAP_MODE_STREAMING
:
3077 case L2CAP_MODE_ERTM
:
3078 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3082 return L2CAP_MODE_BASIC
;
3086 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
3088 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3091 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
3093 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3096 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3097 struct l2cap_conf_rfc
*rfc
)
3099 if (chan
->local_amp_id
&& chan
->hs_hcon
) {
3100 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3102 /* Class 1 devices have must have ERTM timeouts
3103 * exceeding the Link Supervision Timeout. The
3104 * default Link Supervision Timeout for AMP
3105 * controllers is 10 seconds.
3107 * Class 1 devices use 0xffffffff for their
3108 * best-effort flush timeout, so the clamping logic
3109 * will result in a timeout that meets the above
3110 * requirement. ERTM timeouts are 16-bit values, so
3111 * the maximum timeout is 65.535 seconds.
3114 /* Convert timeout to milliseconds and round */
3115 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3117 /* This is the recommended formula for class 2 devices
3118 * that start ERTM timers when packets are sent to the
3121 ertm_to
= 3 * ertm_to
+ 500;
3123 if (ertm_to
> 0xffff)
3126 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3127 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3129 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3130 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3134 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3136 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3137 __l2cap_ews_supported(chan
)) {
3138 /* use extended control field */
3139 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3140 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3142 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3143 L2CAP_DEFAULT_TX_WINDOW
);
3144 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3146 chan
->ack_win
= chan
->tx_win
;
3149 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3151 struct l2cap_conf_req
*req
= data
;
3152 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3153 void *ptr
= req
->data
;
3156 BT_DBG("chan %p", chan
);
3158 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3161 switch (chan
->mode
) {
3162 case L2CAP_MODE_STREAMING
:
3163 case L2CAP_MODE_ERTM
:
3164 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3167 if (__l2cap_efs_supported(chan
))
3168 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3172 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3177 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3178 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3180 switch (chan
->mode
) {
3181 case L2CAP_MODE_BASIC
:
3182 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3183 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3186 rfc
.mode
= L2CAP_MODE_BASIC
;
3188 rfc
.max_transmit
= 0;
3189 rfc
.retrans_timeout
= 0;
3190 rfc
.monitor_timeout
= 0;
3191 rfc
.max_pdu_size
= 0;
3193 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3194 (unsigned long) &rfc
);
3197 case L2CAP_MODE_ERTM
:
3198 rfc
.mode
= L2CAP_MODE_ERTM
;
3199 rfc
.max_transmit
= chan
->max_tx
;
3201 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3203 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3204 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3206 rfc
.max_pdu_size
= cpu_to_le16(size
);
3208 l2cap_txwin_setup(chan
);
3210 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3211 L2CAP_DEFAULT_TX_WINDOW
);
3213 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3214 (unsigned long) &rfc
);
3216 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3217 l2cap_add_opt_efs(&ptr
, chan
);
3219 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3220 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3223 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3224 if (chan
->fcs
== L2CAP_FCS_NONE
||
3225 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3226 chan
->fcs
= L2CAP_FCS_NONE
;
3227 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3232 case L2CAP_MODE_STREAMING
:
3233 l2cap_txwin_setup(chan
);
3234 rfc
.mode
= L2CAP_MODE_STREAMING
;
3236 rfc
.max_transmit
= 0;
3237 rfc
.retrans_timeout
= 0;
3238 rfc
.monitor_timeout
= 0;
3240 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3241 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3243 rfc
.max_pdu_size
= cpu_to_le16(size
);
3245 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3246 (unsigned long) &rfc
);
3248 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3249 l2cap_add_opt_efs(&ptr
, chan
);
3251 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3252 if (chan
->fcs
== L2CAP_FCS_NONE
||
3253 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3254 chan
->fcs
= L2CAP_FCS_NONE
;
3255 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3261 req
->dcid
= cpu_to_le16(chan
->dcid
);
3262 req
->flags
= __constant_cpu_to_le16(0);
3267 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3269 struct l2cap_conf_rsp
*rsp
= data
;
3270 void *ptr
= rsp
->data
;
3271 void *req
= chan
->conf_req
;
3272 int len
= chan
->conf_len
;
3273 int type
, hint
, olen
;
3275 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3276 struct l2cap_conf_efs efs
;
3278 u16 mtu
= L2CAP_DEFAULT_MTU
;
3279 u16 result
= L2CAP_CONF_SUCCESS
;
3282 BT_DBG("chan %p", chan
);
3284 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3285 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3287 hint
= type
& L2CAP_CONF_HINT
;
3288 type
&= L2CAP_CONF_MASK
;
3291 case L2CAP_CONF_MTU
:
3295 case L2CAP_CONF_FLUSH_TO
:
3296 chan
->flush_to
= val
;
3299 case L2CAP_CONF_QOS
:
3302 case L2CAP_CONF_RFC
:
3303 if (olen
== sizeof(rfc
))
3304 memcpy(&rfc
, (void *) val
, olen
);
3307 case L2CAP_CONF_FCS
:
3308 if (val
== L2CAP_FCS_NONE
)
3309 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3312 case L2CAP_CONF_EFS
:
3314 if (olen
== sizeof(efs
))
3315 memcpy(&efs
, (void *) val
, olen
);
3318 case L2CAP_CONF_EWS
:
3320 return -ECONNREFUSED
;
3322 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3323 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3324 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3325 chan
->remote_tx_win
= val
;
3332 result
= L2CAP_CONF_UNKNOWN
;
3333 *((u8
*) ptr
++) = type
;
3338 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3341 switch (chan
->mode
) {
3342 case L2CAP_MODE_STREAMING
:
3343 case L2CAP_MODE_ERTM
:
3344 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3345 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3346 chan
->conn
->feat_mask
);
3351 if (__l2cap_efs_supported(chan
))
3352 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3354 return -ECONNREFUSED
;
3357 if (chan
->mode
!= rfc
.mode
)
3358 return -ECONNREFUSED
;
3364 if (chan
->mode
!= rfc
.mode
) {
3365 result
= L2CAP_CONF_UNACCEPT
;
3366 rfc
.mode
= chan
->mode
;
3368 if (chan
->num_conf_rsp
== 1)
3369 return -ECONNREFUSED
;
3371 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3372 (unsigned long) &rfc
);
3375 if (result
== L2CAP_CONF_SUCCESS
) {
3376 /* Configure output options and let the other side know
3377 * which ones we don't like. */
3379 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3380 result
= L2CAP_CONF_UNACCEPT
;
3383 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3385 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3388 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3389 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3390 efs
.stype
!= chan
->local_stype
) {
3392 result
= L2CAP_CONF_UNACCEPT
;
3394 if (chan
->num_conf_req
>= 1)
3395 return -ECONNREFUSED
;
3397 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3399 (unsigned long) &efs
);
3401 /* Send PENDING Conf Rsp */
3402 result
= L2CAP_CONF_PENDING
;
3403 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3408 case L2CAP_MODE_BASIC
:
3409 chan
->fcs
= L2CAP_FCS_NONE
;
3410 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3413 case L2CAP_MODE_ERTM
:
3414 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3415 chan
->remote_tx_win
= rfc
.txwin_size
;
3417 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3419 chan
->remote_max_tx
= rfc
.max_transmit
;
3421 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3422 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3423 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3424 rfc
.max_pdu_size
= cpu_to_le16(size
);
3425 chan
->remote_mps
= size
;
3427 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3429 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3431 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3432 sizeof(rfc
), (unsigned long) &rfc
);
3434 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3435 chan
->remote_id
= efs
.id
;
3436 chan
->remote_stype
= efs
.stype
;
3437 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3438 chan
->remote_flush_to
=
3439 le32_to_cpu(efs
.flush_to
);
3440 chan
->remote_acc_lat
=
3441 le32_to_cpu(efs
.acc_lat
);
3442 chan
->remote_sdu_itime
=
3443 le32_to_cpu(efs
.sdu_itime
);
3444 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3446 (unsigned long) &efs
);
3450 case L2CAP_MODE_STREAMING
:
3451 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3452 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3453 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3454 rfc
.max_pdu_size
= cpu_to_le16(size
);
3455 chan
->remote_mps
= size
;
3457 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3459 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3460 (unsigned long) &rfc
);
3465 result
= L2CAP_CONF_UNACCEPT
;
3467 memset(&rfc
, 0, sizeof(rfc
));
3468 rfc
.mode
= chan
->mode
;
3471 if (result
== L2CAP_CONF_SUCCESS
)
3472 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3474 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3475 rsp
->result
= cpu_to_le16(result
);
3476 rsp
->flags
= __constant_cpu_to_le16(0);
3481 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3482 void *data
, u16
*result
)
3484 struct l2cap_conf_req
*req
= data
;
3485 void *ptr
= req
->data
;
3488 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3489 struct l2cap_conf_efs efs
;
3491 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3493 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3494 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3497 case L2CAP_CONF_MTU
:
3498 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3499 *result
= L2CAP_CONF_UNACCEPT
;
3500 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3503 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3506 case L2CAP_CONF_FLUSH_TO
:
3507 chan
->flush_to
= val
;
3508 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3512 case L2CAP_CONF_RFC
:
3513 if (olen
== sizeof(rfc
))
3514 memcpy(&rfc
, (void *)val
, olen
);
3516 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3517 rfc
.mode
!= chan
->mode
)
3518 return -ECONNREFUSED
;
3522 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3523 sizeof(rfc
), (unsigned long) &rfc
);
3526 case L2CAP_CONF_EWS
:
3527 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3528 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3532 case L2CAP_CONF_EFS
:
3533 if (olen
== sizeof(efs
))
3534 memcpy(&efs
, (void *)val
, olen
);
3536 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3537 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3538 efs
.stype
!= chan
->local_stype
)
3539 return -ECONNREFUSED
;
3541 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3542 (unsigned long) &efs
);
3545 case L2CAP_CONF_FCS
:
3546 if (*result
== L2CAP_CONF_PENDING
)
3547 if (val
== L2CAP_FCS_NONE
)
3548 set_bit(CONF_RECV_NO_FCS
,
3554 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3555 return -ECONNREFUSED
;
3557 chan
->mode
= rfc
.mode
;
3559 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3561 case L2CAP_MODE_ERTM
:
3562 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3563 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3564 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3565 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3566 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3569 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3570 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3571 chan
->local_sdu_itime
=
3572 le32_to_cpu(efs
.sdu_itime
);
3573 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3574 chan
->local_flush_to
=
3575 le32_to_cpu(efs
.flush_to
);
3579 case L2CAP_MODE_STREAMING
:
3580 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3584 req
->dcid
= cpu_to_le16(chan
->dcid
);
3585 req
->flags
= __constant_cpu_to_le16(0);
3590 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3591 u16 result
, u16 flags
)
3593 struct l2cap_conf_rsp
*rsp
= data
;
3594 void *ptr
= rsp
->data
;
3596 BT_DBG("chan %p", chan
);
3598 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3599 rsp
->result
= cpu_to_le16(result
);
3600 rsp
->flags
= cpu_to_le16(flags
);
3605 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3607 struct l2cap_conn_rsp rsp
;
3608 struct l2cap_conn
*conn
= chan
->conn
;
3612 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3613 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3614 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3615 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3618 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3620 rsp_code
= L2CAP_CONN_RSP
;
3622 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3624 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3626 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3629 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3630 l2cap_build_conf_req(chan
, buf
), buf
);
3631 chan
->num_conf_req
++;
3634 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3638 /* Use sane default values in case a misbehaving remote device
3639 * did not send an RFC or extended window size option.
3641 u16 txwin_ext
= chan
->ack_win
;
3642 struct l2cap_conf_rfc rfc
= {
3644 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3645 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3646 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3647 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3650 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3652 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3655 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3656 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3659 case L2CAP_CONF_RFC
:
3660 if (olen
== sizeof(rfc
))
3661 memcpy(&rfc
, (void *)val
, olen
);
3663 case L2CAP_CONF_EWS
:
3670 case L2CAP_MODE_ERTM
:
3671 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3672 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3673 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3674 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3675 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3677 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3680 case L2CAP_MODE_STREAMING
:
3681 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3685 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3686 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3689 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3691 if (cmd_len
< sizeof(*rej
))
3694 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3697 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3698 cmd
->ident
== conn
->info_ident
) {
3699 cancel_delayed_work(&conn
->info_timer
);
3701 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3702 conn
->info_ident
= 0;
3704 l2cap_conn_start(conn
);
3710 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3711 struct l2cap_cmd_hdr
*cmd
,
3712 u8
*data
, u8 rsp_code
, u8 amp_id
)
3714 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3715 struct l2cap_conn_rsp rsp
;
3716 struct l2cap_chan
*chan
= NULL
, *pchan
;
3717 struct sock
*parent
, *sk
= NULL
;
3718 int result
, status
= L2CAP_CS_NO_INFO
;
3720 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3721 __le16 psm
= req
->psm
;
3723 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3725 /* Check if we have socket listening on psm */
3726 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3728 result
= L2CAP_CR_BAD_PSM
;
3734 mutex_lock(&conn
->chan_lock
);
3737 /* Check if the ACL is secure enough (if not SDP) */
3738 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3739 !hci_conn_check_link_mode(conn
->hcon
)) {
3740 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3741 result
= L2CAP_CR_SEC_BLOCK
;
3745 result
= L2CAP_CR_NO_MEM
;
3747 /* Check if we already have channel with that dcid */
3748 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3751 chan
= pchan
->ops
->new_connection(pchan
);
3757 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3758 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3761 chan
->local_amp_id
= amp_id
;
3763 __l2cap_chan_add(conn
, chan
);
3767 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3769 chan
->ident
= cmd
->ident
;
3771 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3772 if (l2cap_chan_check_security(chan
)) {
3773 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3774 __l2cap_state_change(chan
, BT_CONNECT2
);
3775 result
= L2CAP_CR_PEND
;
3776 status
= L2CAP_CS_AUTHOR_PEND
;
3777 chan
->ops
->defer(chan
);
3779 /* Force pending result for AMP controllers.
3780 * The connection will succeed after the
3781 * physical link is up.
3784 __l2cap_state_change(chan
, BT_CONNECT2
);
3785 result
= L2CAP_CR_PEND
;
3787 __l2cap_state_change(chan
, BT_CONFIG
);
3788 result
= L2CAP_CR_SUCCESS
;
3790 status
= L2CAP_CS_NO_INFO
;
3793 __l2cap_state_change(chan
, BT_CONNECT2
);
3794 result
= L2CAP_CR_PEND
;
3795 status
= L2CAP_CS_AUTHEN_PEND
;
3798 __l2cap_state_change(chan
, BT_CONNECT2
);
3799 result
= L2CAP_CR_PEND
;
3800 status
= L2CAP_CS_NO_INFO
;
3804 release_sock(parent
);
3805 mutex_unlock(&conn
->chan_lock
);
3808 rsp
.scid
= cpu_to_le16(scid
);
3809 rsp
.dcid
= cpu_to_le16(dcid
);
3810 rsp
.result
= cpu_to_le16(result
);
3811 rsp
.status
= cpu_to_le16(status
);
3812 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3814 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3815 struct l2cap_info_req info
;
3816 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3818 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3819 conn
->info_ident
= l2cap_get_ident(conn
);
3821 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3823 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3824 sizeof(info
), &info
);
3827 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3828 result
== L2CAP_CR_SUCCESS
) {
3830 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3831 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3832 l2cap_build_conf_req(chan
, buf
), buf
);
3833 chan
->num_conf_req
++;
3839 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3840 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3842 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3843 struct hci_conn
*hcon
= conn
->hcon
;
3845 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3849 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3850 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3851 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3852 hcon
->dst_type
, 0, NULL
, 0,
3854 hci_dev_unlock(hdev
);
3856 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3860 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3861 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3864 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3865 u16 scid
, dcid
, result
, status
;
3866 struct l2cap_chan
*chan
;
3870 if (cmd_len
< sizeof(*rsp
))
3873 scid
= __le16_to_cpu(rsp
->scid
);
3874 dcid
= __le16_to_cpu(rsp
->dcid
);
3875 result
= __le16_to_cpu(rsp
->result
);
3876 status
= __le16_to_cpu(rsp
->status
);
3878 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3879 dcid
, scid
, result
, status
);
3881 mutex_lock(&conn
->chan_lock
);
3884 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3890 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3899 l2cap_chan_lock(chan
);
3902 case L2CAP_CR_SUCCESS
:
3903 l2cap_state_change(chan
, BT_CONFIG
);
3906 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3908 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3911 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3912 l2cap_build_conf_req(chan
, req
), req
);
3913 chan
->num_conf_req
++;
3917 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3921 l2cap_chan_del(chan
, ECONNREFUSED
);
3925 l2cap_chan_unlock(chan
);
3928 mutex_unlock(&conn
->chan_lock
);
3933 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3935 /* FCS is enabled only in ERTM or streaming mode, if one or both
3938 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3939 chan
->fcs
= L2CAP_FCS_NONE
;
3940 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3941 chan
->fcs
= L2CAP_FCS_CRC16
;
3944 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3945 u8 ident
, u16 flags
)
3947 struct l2cap_conn
*conn
= chan
->conn
;
3949 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3952 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3953 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3955 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3956 l2cap_build_conf_rsp(chan
, data
,
3957 L2CAP_CONF_SUCCESS
, flags
), data
);
3960 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3961 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3964 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3967 struct l2cap_chan
*chan
;
3970 if (cmd_len
< sizeof(*req
))
3973 dcid
= __le16_to_cpu(req
->dcid
);
3974 flags
= __le16_to_cpu(req
->flags
);
3976 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3978 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3982 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3983 struct l2cap_cmd_rej_cid rej
;
3985 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3986 rej
.scid
= cpu_to_le16(chan
->scid
);
3987 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3989 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3994 /* Reject if config buffer is too small. */
3995 len
= cmd_len
- sizeof(*req
);
3996 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3997 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3998 l2cap_build_conf_rsp(chan
, rsp
,
3999 L2CAP_CONF_REJECT
, flags
), rsp
);
4004 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4005 chan
->conf_len
+= len
;
4007 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4008 /* Incomplete config. Send empty response. */
4009 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4010 l2cap_build_conf_rsp(chan
, rsp
,
4011 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4015 /* Complete config. */
4016 len
= l2cap_parse_conf_req(chan
, rsp
);
4018 l2cap_send_disconn_req(chan
, ECONNRESET
);
4022 chan
->ident
= cmd
->ident
;
4023 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4024 chan
->num_conf_rsp
++;
4026 /* Reset config buffer. */
4029 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4032 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4033 set_default_fcs(chan
);
4035 if (chan
->mode
== L2CAP_MODE_ERTM
||
4036 chan
->mode
== L2CAP_MODE_STREAMING
)
4037 err
= l2cap_ertm_init(chan
);
4040 l2cap_send_disconn_req(chan
, -err
);
4042 l2cap_chan_ready(chan
);
4047 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4049 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4050 l2cap_build_conf_req(chan
, buf
), buf
);
4051 chan
->num_conf_req
++;
4054 /* Got Conf Rsp PENDING from remote side and asume we sent
4055 Conf Rsp PENDING in the code above */
4056 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4057 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4059 /* check compatibility */
4061 /* Send rsp for BR/EDR channel */
4063 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4065 chan
->ident
= cmd
->ident
;
4069 l2cap_chan_unlock(chan
);
4073 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4074 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4077 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4078 u16 scid
, flags
, result
;
4079 struct l2cap_chan
*chan
;
4080 int len
= cmd_len
- sizeof(*rsp
);
4083 if (cmd_len
< sizeof(*rsp
))
4086 scid
= __le16_to_cpu(rsp
->scid
);
4087 flags
= __le16_to_cpu(rsp
->flags
);
4088 result
= __le16_to_cpu(rsp
->result
);
4090 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4093 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4098 case L2CAP_CONF_SUCCESS
:
4099 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4100 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4103 case L2CAP_CONF_PENDING
:
4104 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4106 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4109 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4112 l2cap_send_disconn_req(chan
, ECONNRESET
);
4116 if (!chan
->hs_hcon
) {
4117 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4120 if (l2cap_check_efs(chan
)) {
4121 amp_create_logical_link(chan
);
4122 chan
->ident
= cmd
->ident
;
4128 case L2CAP_CONF_UNACCEPT
:
4129 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4132 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4133 l2cap_send_disconn_req(chan
, ECONNRESET
);
4137 /* throw out any old stored conf requests */
4138 result
= L2CAP_CONF_SUCCESS
;
4139 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4142 l2cap_send_disconn_req(chan
, ECONNRESET
);
4146 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4147 L2CAP_CONF_REQ
, len
, req
);
4148 chan
->num_conf_req
++;
4149 if (result
!= L2CAP_CONF_SUCCESS
)
4155 l2cap_chan_set_err(chan
, ECONNRESET
);
4157 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4158 l2cap_send_disconn_req(chan
, ECONNRESET
);
4162 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4165 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4167 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4168 set_default_fcs(chan
);
4170 if (chan
->mode
== L2CAP_MODE_ERTM
||
4171 chan
->mode
== L2CAP_MODE_STREAMING
)
4172 err
= l2cap_ertm_init(chan
);
4175 l2cap_send_disconn_req(chan
, -err
);
4177 l2cap_chan_ready(chan
);
4181 l2cap_chan_unlock(chan
);
4185 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4186 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4189 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4190 struct l2cap_disconn_rsp rsp
;
4192 struct l2cap_chan
*chan
;
4195 if (cmd_len
!= sizeof(*req
))
4198 scid
= __le16_to_cpu(req
->scid
);
4199 dcid
= __le16_to_cpu(req
->dcid
);
4201 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4203 mutex_lock(&conn
->chan_lock
);
4205 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4207 mutex_unlock(&conn
->chan_lock
);
4211 l2cap_chan_lock(chan
);
4215 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4216 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4217 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4220 sk
->sk_shutdown
= SHUTDOWN_MASK
;
4223 l2cap_chan_hold(chan
);
4224 l2cap_chan_del(chan
, ECONNRESET
);
4226 l2cap_chan_unlock(chan
);
4228 chan
->ops
->close(chan
);
4229 l2cap_chan_put(chan
);
4231 mutex_unlock(&conn
->chan_lock
);
4236 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4237 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4240 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4242 struct l2cap_chan
*chan
;
4244 if (cmd_len
!= sizeof(*rsp
))
4247 scid
= __le16_to_cpu(rsp
->scid
);
4248 dcid
= __le16_to_cpu(rsp
->dcid
);
4250 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4252 mutex_lock(&conn
->chan_lock
);
4254 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4256 mutex_unlock(&conn
->chan_lock
);
4260 l2cap_chan_lock(chan
);
4262 l2cap_chan_hold(chan
);
4263 l2cap_chan_del(chan
, 0);
4265 l2cap_chan_unlock(chan
);
4267 chan
->ops
->close(chan
);
4268 l2cap_chan_put(chan
);
4270 mutex_unlock(&conn
->chan_lock
);
4275 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4276 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4279 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4282 if (cmd_len
!= sizeof(*req
))
4285 type
= __le16_to_cpu(req
->type
);
4287 BT_DBG("type 0x%4.4x", type
);
4289 if (type
== L2CAP_IT_FEAT_MASK
) {
4291 u32 feat_mask
= l2cap_feat_mask
;
4292 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4293 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4294 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4296 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4299 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4300 | L2CAP_FEAT_EXT_WINDOW
;
4302 put_unaligned_le32(feat_mask
, rsp
->data
);
4303 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4305 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4307 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4310 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4312 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4314 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4315 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4316 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4317 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4320 struct l2cap_info_rsp rsp
;
4321 rsp
.type
= cpu_to_le16(type
);
4322 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4323 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4330 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4331 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4334 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4337 if (cmd_len
< sizeof(*rsp
))
4340 type
= __le16_to_cpu(rsp
->type
);
4341 result
= __le16_to_cpu(rsp
->result
);
4343 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4345 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4346 if (cmd
->ident
!= conn
->info_ident
||
4347 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4350 cancel_delayed_work(&conn
->info_timer
);
4352 if (result
!= L2CAP_IR_SUCCESS
) {
4353 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4354 conn
->info_ident
= 0;
4356 l2cap_conn_start(conn
);
4362 case L2CAP_IT_FEAT_MASK
:
4363 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4365 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4366 struct l2cap_info_req req
;
4367 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4369 conn
->info_ident
= l2cap_get_ident(conn
);
4371 l2cap_send_cmd(conn
, conn
->info_ident
,
4372 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4374 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4375 conn
->info_ident
= 0;
4377 l2cap_conn_start(conn
);
4381 case L2CAP_IT_FIXED_CHAN
:
4382 conn
->fixed_chan_mask
= rsp
->data
[0];
4383 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4384 conn
->info_ident
= 0;
4386 l2cap_conn_start(conn
);
4393 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4394 struct l2cap_cmd_hdr
*cmd
,
4395 u16 cmd_len
, void *data
)
4397 struct l2cap_create_chan_req
*req
= data
;
4398 struct l2cap_create_chan_rsp rsp
;
4399 struct l2cap_chan
*chan
;
4400 struct hci_dev
*hdev
;
4403 if (cmd_len
!= sizeof(*req
))
4409 psm
= le16_to_cpu(req
->psm
);
4410 scid
= le16_to_cpu(req
->scid
);
4412 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4414 /* For controller id 0 make BR/EDR connection */
4415 if (req
->amp_id
== HCI_BREDR_ID
) {
4416 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4421 /* Validate AMP controller id */
4422 hdev
= hci_dev_get(req
->amp_id
);
4426 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4431 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4434 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4435 struct hci_conn
*hs_hcon
;
4437 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
, conn
->dst
);
4443 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4445 mgr
->bredr_chan
= chan
;
4446 chan
->hs_hcon
= hs_hcon
;
4447 chan
->fcs
= L2CAP_FCS_NONE
;
4448 conn
->mtu
= hdev
->block_mtu
;
4457 rsp
.scid
= cpu_to_le16(scid
);
4458 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4459 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4461 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4467 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4469 struct l2cap_move_chan_req req
;
4472 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4474 ident
= l2cap_get_ident(chan
->conn
);
4475 chan
->ident
= ident
;
4477 req
.icid
= cpu_to_le16(chan
->scid
);
4478 req
.dest_amp_id
= dest_amp_id
;
4480 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4483 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4486 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4488 struct l2cap_move_chan_rsp rsp
;
4490 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4492 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4493 rsp
.result
= cpu_to_le16(result
);
4495 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4499 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4501 struct l2cap_move_chan_cfm cfm
;
4503 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4505 chan
->ident
= l2cap_get_ident(chan
->conn
);
4507 cfm
.icid
= cpu_to_le16(chan
->scid
);
4508 cfm
.result
= cpu_to_le16(result
);
4510 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4513 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4516 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4518 struct l2cap_move_chan_cfm cfm
;
4520 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4522 cfm
.icid
= cpu_to_le16(icid
);
4523 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4525 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4529 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4532 struct l2cap_move_chan_cfm_rsp rsp
;
4534 BT_DBG("icid 0x%4.4x", icid
);
4536 rsp
.icid
= cpu_to_le16(icid
);
4537 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4540 static void __release_logical_link(struct l2cap_chan
*chan
)
4542 chan
->hs_hchan
= NULL
;
4543 chan
->hs_hcon
= NULL
;
4545 /* Placeholder - release the logical link */
4548 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4550 /* Logical link setup failed */
4551 if (chan
->state
!= BT_CONNECTED
) {
4552 /* Create channel failure, disconnect */
4553 l2cap_send_disconn_req(chan
, ECONNRESET
);
4557 switch (chan
->move_role
) {
4558 case L2CAP_MOVE_ROLE_RESPONDER
:
4559 l2cap_move_done(chan
);
4560 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4562 case L2CAP_MOVE_ROLE_INITIATOR
:
4563 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4564 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4565 /* Remote has only sent pending or
4566 * success responses, clean up
4568 l2cap_move_done(chan
);
4571 /* Other amp move states imply that the move
4572 * has already aborted
4574 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4579 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4580 struct hci_chan
*hchan
)
4582 struct l2cap_conf_rsp rsp
;
4584 chan
->hs_hchan
= hchan
;
4585 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4587 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4589 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4592 set_default_fcs(chan
);
4594 err
= l2cap_ertm_init(chan
);
4596 l2cap_send_disconn_req(chan
, -err
);
4598 l2cap_chan_ready(chan
);
4602 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4603 struct hci_chan
*hchan
)
4605 chan
->hs_hcon
= hchan
->conn
;
4606 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4608 BT_DBG("move_state %d", chan
->move_state
);
4610 switch (chan
->move_state
) {
4611 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4612 /* Move confirm will be sent after a success
4613 * response is received
4615 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4617 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4618 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4619 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4620 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4621 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4622 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4623 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4624 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4625 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4629 /* Move was not in expected state, free the channel */
4630 __release_logical_link(chan
);
4632 chan
->move_state
= L2CAP_MOVE_STABLE
;
4636 /* Call with chan locked */
4637 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4640 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4643 l2cap_logical_fail(chan
);
4644 __release_logical_link(chan
);
4648 if (chan
->state
!= BT_CONNECTED
) {
4649 /* Ignore logical link if channel is on BR/EDR */
4650 if (chan
->local_amp_id
)
4651 l2cap_logical_finish_create(chan
, hchan
);
4653 l2cap_logical_finish_move(chan
, hchan
);
4657 void l2cap_move_start(struct l2cap_chan
*chan
)
4659 BT_DBG("chan %p", chan
);
4661 if (chan
->local_amp_id
== HCI_BREDR_ID
) {
4662 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4664 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4665 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4666 /* Placeholder - start physical link setup */
4668 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4669 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4671 l2cap_move_setup(chan
);
4672 l2cap_send_move_chan_req(chan
, 0);
4676 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4677 u8 local_amp_id
, u8 remote_amp_id
)
4679 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4680 local_amp_id
, remote_amp_id
);
4682 chan
->fcs
= L2CAP_FCS_NONE
;
4684 /* Outgoing channel on AMP */
4685 if (chan
->state
== BT_CONNECT
) {
4686 if (result
== L2CAP_CR_SUCCESS
) {
4687 chan
->local_amp_id
= local_amp_id
;
4688 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4690 /* Revert to BR/EDR connect */
4691 l2cap_send_conn_req(chan
);
4697 /* Incoming channel on AMP */
4698 if (__l2cap_no_conn_pending(chan
)) {
4699 struct l2cap_conn_rsp rsp
;
4701 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4702 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4704 if (result
== L2CAP_CR_SUCCESS
) {
4705 /* Send successful response */
4706 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4707 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4709 /* Send negative response */
4710 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4711 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4714 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4717 if (result
== L2CAP_CR_SUCCESS
) {
4718 __l2cap_state_change(chan
, BT_CONFIG
);
4719 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4720 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4722 l2cap_build_conf_req(chan
, buf
), buf
);
4723 chan
->num_conf_req
++;
4728 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4731 l2cap_move_setup(chan
);
4732 chan
->move_id
= local_amp_id
;
4733 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4735 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4738 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4740 struct hci_chan
*hchan
= NULL
;
4742 /* Placeholder - get hci_chan for logical link */
4745 if (hchan
->state
== BT_CONNECTED
) {
4746 /* Logical link is ready to go */
4747 chan
->hs_hcon
= hchan
->conn
;
4748 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4749 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4750 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4752 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4754 /* Wait for logical link to be ready */
4755 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4758 /* Logical link not available */
4759 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4763 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4765 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4767 if (result
== -EINVAL
)
4768 rsp_result
= L2CAP_MR_BAD_ID
;
4770 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4772 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4775 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4776 chan
->move_state
= L2CAP_MOVE_STABLE
;
4778 /* Restart data transmission */
4779 l2cap_ertm_send(chan
);
4782 /* Invoke with locked chan */
4783 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4785 u8 local_amp_id
= chan
->local_amp_id
;
4786 u8 remote_amp_id
= chan
->remote_amp_id
;
4788 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4789 chan
, result
, local_amp_id
, remote_amp_id
);
4791 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4792 l2cap_chan_unlock(chan
);
4796 if (chan
->state
!= BT_CONNECTED
) {
4797 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4798 } else if (result
!= L2CAP_MR_SUCCESS
) {
4799 l2cap_do_move_cancel(chan
, result
);
4801 switch (chan
->move_role
) {
4802 case L2CAP_MOVE_ROLE_INITIATOR
:
4803 l2cap_do_move_initiate(chan
, local_amp_id
,
4806 case L2CAP_MOVE_ROLE_RESPONDER
:
4807 l2cap_do_move_respond(chan
, result
);
4810 l2cap_do_move_cancel(chan
, result
);
4816 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4817 struct l2cap_cmd_hdr
*cmd
,
4818 u16 cmd_len
, void *data
)
4820 struct l2cap_move_chan_req
*req
= data
;
4821 struct l2cap_move_chan_rsp rsp
;
4822 struct l2cap_chan
*chan
;
4824 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4826 if (cmd_len
!= sizeof(*req
))
4829 icid
= le16_to_cpu(req
->icid
);
4831 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4836 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4838 rsp
.icid
= cpu_to_le16(icid
);
4839 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4840 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4845 chan
->ident
= cmd
->ident
;
4847 if (chan
->scid
< L2CAP_CID_DYN_START
||
4848 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4849 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4850 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4851 result
= L2CAP_MR_NOT_ALLOWED
;
4852 goto send_move_response
;
4855 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4856 result
= L2CAP_MR_SAME_ID
;
4857 goto send_move_response
;
4860 if (req
->dest_amp_id
) {
4861 struct hci_dev
*hdev
;
4862 hdev
= hci_dev_get(req
->dest_amp_id
);
4863 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4864 !test_bit(HCI_UP
, &hdev
->flags
)) {
4868 result
= L2CAP_MR_BAD_ID
;
4869 goto send_move_response
;
4874 /* Detect a move collision. Only send a collision response
4875 * if this side has "lost", otherwise proceed with the move.
4876 * The winner has the larger bd_addr.
4878 if ((__chan_is_moving(chan
) ||
4879 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4880 bacmp(conn
->src
, conn
->dst
) > 0) {
4881 result
= L2CAP_MR_COLLISION
;
4882 goto send_move_response
;
4885 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4886 l2cap_move_setup(chan
);
4887 chan
->move_id
= req
->dest_amp_id
;
4890 if (!req
->dest_amp_id
) {
4891 /* Moving to BR/EDR */
4892 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4893 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4894 result
= L2CAP_MR_PEND
;
4896 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4897 result
= L2CAP_MR_SUCCESS
;
4900 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4901 /* Placeholder - uncomment when amp functions are available */
4902 /*amp_accept_physical(chan, req->dest_amp_id);*/
4903 result
= L2CAP_MR_PEND
;
4907 l2cap_send_move_chan_rsp(chan
, result
);
4909 l2cap_chan_unlock(chan
);
4914 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4916 struct l2cap_chan
*chan
;
4917 struct hci_chan
*hchan
= NULL
;
4919 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4921 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4925 __clear_chan_timer(chan
);
4926 if (result
== L2CAP_MR_PEND
)
4927 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4929 switch (chan
->move_state
) {
4930 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4931 /* Move confirm will be sent when logical link
4934 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4936 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4937 if (result
== L2CAP_MR_PEND
) {
4939 } else if (test_bit(CONN_LOCAL_BUSY
,
4940 &chan
->conn_state
)) {
4941 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4943 /* Logical link is up or moving to BR/EDR,
4946 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4947 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4950 case L2CAP_MOVE_WAIT_RSP
:
4952 if (result
== L2CAP_MR_SUCCESS
) {
4953 /* Remote is ready, send confirm immediately
4954 * after logical link is ready
4956 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4958 /* Both logical link and move success
4959 * are required to confirm
4961 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4964 /* Placeholder - get hci_chan for logical link */
4966 /* Logical link not available */
4967 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4971 /* If the logical link is not yet connected, do not
4972 * send confirmation.
4974 if (hchan
->state
!= BT_CONNECTED
)
4977 /* Logical link is already ready to go */
4979 chan
->hs_hcon
= hchan
->conn
;
4980 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4982 if (result
== L2CAP_MR_SUCCESS
) {
4983 /* Can confirm now */
4984 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4986 /* Now only need move success
4989 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4992 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4995 /* Any other amp move state means the move failed. */
4996 chan
->move_id
= chan
->local_amp_id
;
4997 l2cap_move_done(chan
);
4998 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5001 l2cap_chan_unlock(chan
);
5004 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5007 struct l2cap_chan
*chan
;
5009 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5011 /* Could not locate channel, icid is best guess */
5012 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5016 __clear_chan_timer(chan
);
5018 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5019 if (result
== L2CAP_MR_COLLISION
) {
5020 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5022 /* Cleanup - cancel move */
5023 chan
->move_id
= chan
->local_amp_id
;
5024 l2cap_move_done(chan
);
5028 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5030 l2cap_chan_unlock(chan
);
5033 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5034 struct l2cap_cmd_hdr
*cmd
,
5035 u16 cmd_len
, void *data
)
5037 struct l2cap_move_chan_rsp
*rsp
= data
;
5040 if (cmd_len
!= sizeof(*rsp
))
5043 icid
= le16_to_cpu(rsp
->icid
);
5044 result
= le16_to_cpu(rsp
->result
);
5046 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5048 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5049 l2cap_move_continue(conn
, icid
, result
);
5051 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5056 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5057 struct l2cap_cmd_hdr
*cmd
,
5058 u16 cmd_len
, void *data
)
5060 struct l2cap_move_chan_cfm
*cfm
= data
;
5061 struct l2cap_chan
*chan
;
5064 if (cmd_len
!= sizeof(*cfm
))
5067 icid
= le16_to_cpu(cfm
->icid
);
5068 result
= le16_to_cpu(cfm
->result
);
5070 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5072 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5074 /* Spec requires a response even if the icid was not found */
5075 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5079 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5080 if (result
== L2CAP_MC_CONFIRMED
) {
5081 chan
->local_amp_id
= chan
->move_id
;
5082 if (!chan
->local_amp_id
)
5083 __release_logical_link(chan
);
5085 chan
->move_id
= chan
->local_amp_id
;
5088 l2cap_move_done(chan
);
5091 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5093 l2cap_chan_unlock(chan
);
5098 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5099 struct l2cap_cmd_hdr
*cmd
,
5100 u16 cmd_len
, void *data
)
5102 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5103 struct l2cap_chan
*chan
;
5106 if (cmd_len
!= sizeof(*rsp
))
5109 icid
= le16_to_cpu(rsp
->icid
);
5111 BT_DBG("icid 0x%4.4x", icid
);
5113 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5117 __clear_chan_timer(chan
);
5119 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5120 chan
->local_amp_id
= chan
->move_id
;
5122 if (!chan
->local_amp_id
&& chan
->hs_hchan
)
5123 __release_logical_link(chan
);
5125 l2cap_move_done(chan
);
5128 l2cap_chan_unlock(chan
);
5133 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5138 if (min
> max
|| min
< 6 || max
> 3200)
5141 if (to_multiplier
< 10 || to_multiplier
> 3200)
5144 if (max
>= to_multiplier
* 8)
5147 max_latency
= (to_multiplier
* 8 / max
) - 1;
5148 if (latency
> 499 || latency
> max_latency
)
5154 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5155 struct l2cap_cmd_hdr
*cmd
,
5158 struct hci_conn
*hcon
= conn
->hcon
;
5159 struct l2cap_conn_param_update_req
*req
;
5160 struct l2cap_conn_param_update_rsp rsp
;
5161 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
5164 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5167 cmd_len
= __le16_to_cpu(cmd
->len
);
5168 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5171 req
= (struct l2cap_conn_param_update_req
*) data
;
5172 min
= __le16_to_cpu(req
->min
);
5173 max
= __le16_to_cpu(req
->max
);
5174 latency
= __le16_to_cpu(req
->latency
);
5175 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5177 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5178 min
, max
, latency
, to_multiplier
);
5180 memset(&rsp
, 0, sizeof(rsp
));
5182 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5184 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5186 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5188 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5192 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5197 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5198 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5203 switch (cmd
->code
) {
5204 case L2CAP_COMMAND_REJ
:
5205 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5208 case L2CAP_CONN_REQ
:
5209 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5212 case L2CAP_CONN_RSP
:
5213 case L2CAP_CREATE_CHAN_RSP
:
5214 err
= l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5217 case L2CAP_CONF_REQ
:
5218 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5221 case L2CAP_CONF_RSP
:
5222 err
= l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5225 case L2CAP_DISCONN_REQ
:
5226 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5229 case L2CAP_DISCONN_RSP
:
5230 err
= l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5233 case L2CAP_ECHO_REQ
:
5234 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5237 case L2CAP_ECHO_RSP
:
5240 case L2CAP_INFO_REQ
:
5241 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5244 case L2CAP_INFO_RSP
:
5245 err
= l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5248 case L2CAP_CREATE_CHAN_REQ
:
5249 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5252 case L2CAP_MOVE_CHAN_REQ
:
5253 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5256 case L2CAP_MOVE_CHAN_RSP
:
5257 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5260 case L2CAP_MOVE_CHAN_CFM
:
5261 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5264 case L2CAP_MOVE_CHAN_CFM_RSP
:
5265 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5269 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5277 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5278 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
5280 switch (cmd
->code
) {
5281 case L2CAP_COMMAND_REJ
:
5284 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5285 return l2cap_conn_param_update_req(conn
, cmd
, data
);
5287 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5291 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5296 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5297 struct sk_buff
*skb
)
5299 u8
*data
= skb
->data
;
5301 struct l2cap_cmd_hdr cmd
;
5304 l2cap_raw_recv(conn
, skb
);
5306 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5308 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5309 data
+= L2CAP_CMD_HDR_SIZE
;
5310 len
-= L2CAP_CMD_HDR_SIZE
;
5312 cmd_len
= le16_to_cpu(cmd
.len
);
5314 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5317 if (cmd_len
> len
|| !cmd
.ident
) {
5318 BT_DBG("corrupted command");
5322 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
5324 struct l2cap_cmd_rej_unk rej
;
5326 BT_ERR("Wrong link type (%d)", err
);
5328 /* FIXME: Map err to a valid reason */
5329 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5330 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5341 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5342 struct sk_buff
*skb
)
5344 u8
*data
= skb
->data
;
5346 struct l2cap_cmd_hdr cmd
;
5349 l2cap_raw_recv(conn
, skb
);
5351 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5353 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5354 data
+= L2CAP_CMD_HDR_SIZE
;
5355 len
-= L2CAP_CMD_HDR_SIZE
;
5357 cmd_len
= le16_to_cpu(cmd
.len
);
5359 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5362 if (cmd_len
> len
|| !cmd
.ident
) {
5363 BT_DBG("corrupted command");
5367 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5369 struct l2cap_cmd_rej_unk rej
;
5371 BT_ERR("Wrong link type (%d)", err
);
5373 /* FIXME: Map err to a valid reason */
5374 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5375 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5386 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5388 u16 our_fcs
, rcv_fcs
;
5391 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5392 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5394 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5396 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5397 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5398 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5399 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5401 if (our_fcs
!= rcv_fcs
)
5407 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5409 struct l2cap_ctrl control
;
5411 BT_DBG("chan %p", chan
);
5413 memset(&control
, 0, sizeof(control
));
5416 control
.reqseq
= chan
->buffer_seq
;
5417 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5419 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5420 control
.super
= L2CAP_SUPER_RNR
;
5421 l2cap_send_sframe(chan
, &control
);
5424 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5425 chan
->unacked_frames
> 0)
5426 __set_retrans_timer(chan
);
5428 /* Send pending iframes */
5429 l2cap_ertm_send(chan
);
5431 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5432 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5433 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5436 control
.super
= L2CAP_SUPER_RR
;
5437 l2cap_send_sframe(chan
, &control
);
5441 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5442 struct sk_buff
**last_frag
)
5444 /* skb->len reflects data in skb as well as all fragments
5445 * skb->data_len reflects only data in fragments
5447 if (!skb_has_frag_list(skb
))
5448 skb_shinfo(skb
)->frag_list
= new_frag
;
5450 new_frag
->next
= NULL
;
5452 (*last_frag
)->next
= new_frag
;
5453 *last_frag
= new_frag
;
5455 skb
->len
+= new_frag
->len
;
5456 skb
->data_len
+= new_frag
->len
;
5457 skb
->truesize
+= new_frag
->truesize
;
5460 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5461 struct l2cap_ctrl
*control
)
5465 switch (control
->sar
) {
5466 case L2CAP_SAR_UNSEGMENTED
:
5470 err
= chan
->ops
->recv(chan
, skb
);
5473 case L2CAP_SAR_START
:
5477 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5478 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5480 if (chan
->sdu_len
> chan
->imtu
) {
5485 if (skb
->len
>= chan
->sdu_len
)
5489 chan
->sdu_last_frag
= skb
;
5495 case L2CAP_SAR_CONTINUE
:
5499 append_skb_frag(chan
->sdu
, skb
,
5500 &chan
->sdu_last_frag
);
5503 if (chan
->sdu
->len
>= chan
->sdu_len
)
5513 append_skb_frag(chan
->sdu
, skb
,
5514 &chan
->sdu_last_frag
);
5517 if (chan
->sdu
->len
!= chan
->sdu_len
)
5520 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5523 /* Reassembly complete */
5525 chan
->sdu_last_frag
= NULL
;
5533 kfree_skb(chan
->sdu
);
5535 chan
->sdu_last_frag
= NULL
;
5542 static int l2cap_resegment(struct l2cap_chan
*chan
)
5548 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5552 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5555 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5556 l2cap_tx(chan
, NULL
, NULL
, event
);
5559 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5562 /* Pass sequential frames to l2cap_reassemble_sdu()
5563 * until a gap is encountered.
5566 BT_DBG("chan %p", chan
);
5568 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5569 struct sk_buff
*skb
;
5570 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5571 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5573 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5578 skb_unlink(skb
, &chan
->srej_q
);
5579 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5580 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5585 if (skb_queue_empty(&chan
->srej_q
)) {
5586 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5587 l2cap_send_ack(chan
);
5593 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5594 struct l2cap_ctrl
*control
)
5596 struct sk_buff
*skb
;
5598 BT_DBG("chan %p, control %p", chan
, control
);
5600 if (control
->reqseq
== chan
->next_tx_seq
) {
5601 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5602 l2cap_send_disconn_req(chan
, ECONNRESET
);
5606 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5609 BT_DBG("Seq %d not available for retransmission",
5614 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5615 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5616 l2cap_send_disconn_req(chan
, ECONNRESET
);
5620 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5622 if (control
->poll
) {
5623 l2cap_pass_to_tx(chan
, control
);
5625 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5626 l2cap_retransmit(chan
, control
);
5627 l2cap_ertm_send(chan
);
5629 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5630 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5631 chan
->srej_save_reqseq
= control
->reqseq
;
5634 l2cap_pass_to_tx_fbit(chan
, control
);
5636 if (control
->final
) {
5637 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5638 !test_and_clear_bit(CONN_SREJ_ACT
,
5640 l2cap_retransmit(chan
, control
);
5642 l2cap_retransmit(chan
, control
);
5643 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5644 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5645 chan
->srej_save_reqseq
= control
->reqseq
;
5651 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5652 struct l2cap_ctrl
*control
)
5654 struct sk_buff
*skb
;
5656 BT_DBG("chan %p, control %p", chan
, control
);
5658 if (control
->reqseq
== chan
->next_tx_seq
) {
5659 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5660 l2cap_send_disconn_req(chan
, ECONNRESET
);
5664 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5666 if (chan
->max_tx
&& skb
&&
5667 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5668 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5669 l2cap_send_disconn_req(chan
, ECONNRESET
);
5673 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5675 l2cap_pass_to_tx(chan
, control
);
5677 if (control
->final
) {
5678 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5679 l2cap_retransmit_all(chan
, control
);
5681 l2cap_retransmit_all(chan
, control
);
5682 l2cap_ertm_send(chan
);
5683 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5684 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5688 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5690 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5692 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5693 chan
->expected_tx_seq
);
5695 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5696 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5698 /* See notes below regarding "double poll" and
5701 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5702 BT_DBG("Invalid/Ignore - after SREJ");
5703 return L2CAP_TXSEQ_INVALID_IGNORE
;
5705 BT_DBG("Invalid - in window after SREJ sent");
5706 return L2CAP_TXSEQ_INVALID
;
5710 if (chan
->srej_list
.head
== txseq
) {
5711 BT_DBG("Expected SREJ");
5712 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5715 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5716 BT_DBG("Duplicate SREJ - txseq already stored");
5717 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5720 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5721 BT_DBG("Unexpected SREJ - not requested");
5722 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5726 if (chan
->expected_tx_seq
== txseq
) {
5727 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5729 BT_DBG("Invalid - txseq outside tx window");
5730 return L2CAP_TXSEQ_INVALID
;
5733 return L2CAP_TXSEQ_EXPECTED
;
5737 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5738 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5739 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5740 return L2CAP_TXSEQ_DUPLICATE
;
5743 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5744 /* A source of invalid packets is a "double poll" condition,
5745 * where delays cause us to send multiple poll packets. If
5746 * the remote stack receives and processes both polls,
5747 * sequence numbers can wrap around in such a way that a
5748 * resent frame has a sequence number that looks like new data
5749 * with a sequence gap. This would trigger an erroneous SREJ
5752 * Fortunately, this is impossible with a tx window that's
5753 * less than half of the maximum sequence number, which allows
5754 * invalid frames to be safely ignored.
5756 * With tx window sizes greater than half of the tx window
5757 * maximum, the frame is invalid and cannot be ignored. This
5758 * causes a disconnect.
5761 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5762 BT_DBG("Invalid/Ignore - txseq outside tx window");
5763 return L2CAP_TXSEQ_INVALID_IGNORE
;
5765 BT_DBG("Invalid - txseq outside tx window");
5766 return L2CAP_TXSEQ_INVALID
;
5769 BT_DBG("Unexpected - txseq indicates missing frames");
5770 return L2CAP_TXSEQ_UNEXPECTED
;
5774 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5775 struct l2cap_ctrl
*control
,
5776 struct sk_buff
*skb
, u8 event
)
5779 bool skb_in_use
= 0;
5781 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5785 case L2CAP_EV_RECV_IFRAME
:
5786 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5787 case L2CAP_TXSEQ_EXPECTED
:
5788 l2cap_pass_to_tx(chan
, control
);
5790 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5791 BT_DBG("Busy, discarding expected seq %d",
5796 chan
->expected_tx_seq
= __next_seq(chan
,
5799 chan
->buffer_seq
= chan
->expected_tx_seq
;
5802 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5806 if (control
->final
) {
5807 if (!test_and_clear_bit(CONN_REJ_ACT
,
5808 &chan
->conn_state
)) {
5810 l2cap_retransmit_all(chan
, control
);
5811 l2cap_ertm_send(chan
);
5815 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5816 l2cap_send_ack(chan
);
5818 case L2CAP_TXSEQ_UNEXPECTED
:
5819 l2cap_pass_to_tx(chan
, control
);
5821 /* Can't issue SREJ frames in the local busy state.
5822 * Drop this frame, it will be seen as missing
5823 * when local busy is exited.
5825 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5826 BT_DBG("Busy, discarding unexpected seq %d",
5831 /* There was a gap in the sequence, so an SREJ
5832 * must be sent for each missing frame. The
5833 * current frame is stored for later use.
5835 skb_queue_tail(&chan
->srej_q
, skb
);
5837 BT_DBG("Queued %p (queue len %d)", skb
,
5838 skb_queue_len(&chan
->srej_q
));
5840 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5841 l2cap_seq_list_clear(&chan
->srej_list
);
5842 l2cap_send_srej(chan
, control
->txseq
);
5844 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5846 case L2CAP_TXSEQ_DUPLICATE
:
5847 l2cap_pass_to_tx(chan
, control
);
5849 case L2CAP_TXSEQ_INVALID_IGNORE
:
5851 case L2CAP_TXSEQ_INVALID
:
5853 l2cap_send_disconn_req(chan
, ECONNRESET
);
5857 case L2CAP_EV_RECV_RR
:
5858 l2cap_pass_to_tx(chan
, control
);
5859 if (control
->final
) {
5860 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5862 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
5863 !__chan_is_moving(chan
)) {
5865 l2cap_retransmit_all(chan
, control
);
5868 l2cap_ertm_send(chan
);
5869 } else if (control
->poll
) {
5870 l2cap_send_i_or_rr_or_rnr(chan
);
5872 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5873 &chan
->conn_state
) &&
5874 chan
->unacked_frames
)
5875 __set_retrans_timer(chan
);
5877 l2cap_ertm_send(chan
);
5880 case L2CAP_EV_RECV_RNR
:
5881 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5882 l2cap_pass_to_tx(chan
, control
);
5883 if (control
&& control
->poll
) {
5884 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5885 l2cap_send_rr_or_rnr(chan
, 0);
5887 __clear_retrans_timer(chan
);
5888 l2cap_seq_list_clear(&chan
->retrans_list
);
5890 case L2CAP_EV_RECV_REJ
:
5891 l2cap_handle_rej(chan
, control
);
5893 case L2CAP_EV_RECV_SREJ
:
5894 l2cap_handle_srej(chan
, control
);
5900 if (skb
&& !skb_in_use
) {
5901 BT_DBG("Freeing %p", skb
);
5908 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5909 struct l2cap_ctrl
*control
,
5910 struct sk_buff
*skb
, u8 event
)
5913 u16 txseq
= control
->txseq
;
5914 bool skb_in_use
= 0;
5916 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5920 case L2CAP_EV_RECV_IFRAME
:
5921 switch (l2cap_classify_txseq(chan
, txseq
)) {
5922 case L2CAP_TXSEQ_EXPECTED
:
5923 /* Keep frame for reassembly later */
5924 l2cap_pass_to_tx(chan
, control
);
5925 skb_queue_tail(&chan
->srej_q
, skb
);
5927 BT_DBG("Queued %p (queue len %d)", skb
,
5928 skb_queue_len(&chan
->srej_q
));
5930 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5932 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5933 l2cap_seq_list_pop(&chan
->srej_list
);
5935 l2cap_pass_to_tx(chan
, control
);
5936 skb_queue_tail(&chan
->srej_q
, skb
);
5938 BT_DBG("Queued %p (queue len %d)", skb
,
5939 skb_queue_len(&chan
->srej_q
));
5941 err
= l2cap_rx_queued_iframes(chan
);
5946 case L2CAP_TXSEQ_UNEXPECTED
:
5947 /* Got a frame that can't be reassembled yet.
5948 * Save it for later, and send SREJs to cover
5949 * the missing frames.
5951 skb_queue_tail(&chan
->srej_q
, skb
);
5953 BT_DBG("Queued %p (queue len %d)", skb
,
5954 skb_queue_len(&chan
->srej_q
));
5956 l2cap_pass_to_tx(chan
, control
);
5957 l2cap_send_srej(chan
, control
->txseq
);
5959 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5960 /* This frame was requested with an SREJ, but
5961 * some expected retransmitted frames are
5962 * missing. Request retransmission of missing
5965 skb_queue_tail(&chan
->srej_q
, skb
);
5967 BT_DBG("Queued %p (queue len %d)", skb
,
5968 skb_queue_len(&chan
->srej_q
));
5970 l2cap_pass_to_tx(chan
, control
);
5971 l2cap_send_srej_list(chan
, control
->txseq
);
5973 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5974 /* We've already queued this frame. Drop this copy. */
5975 l2cap_pass_to_tx(chan
, control
);
5977 case L2CAP_TXSEQ_DUPLICATE
:
5978 /* Expecting a later sequence number, so this frame
5979 * was already received. Ignore it completely.
5982 case L2CAP_TXSEQ_INVALID_IGNORE
:
5984 case L2CAP_TXSEQ_INVALID
:
5986 l2cap_send_disconn_req(chan
, ECONNRESET
);
5990 case L2CAP_EV_RECV_RR
:
5991 l2cap_pass_to_tx(chan
, control
);
5992 if (control
->final
) {
5993 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5995 if (!test_and_clear_bit(CONN_REJ_ACT
,
5996 &chan
->conn_state
)) {
5998 l2cap_retransmit_all(chan
, control
);
6001 l2cap_ertm_send(chan
);
6002 } else if (control
->poll
) {
6003 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6004 &chan
->conn_state
) &&
6005 chan
->unacked_frames
) {
6006 __set_retrans_timer(chan
);
6009 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6010 l2cap_send_srej_tail(chan
);
6012 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6013 &chan
->conn_state
) &&
6014 chan
->unacked_frames
)
6015 __set_retrans_timer(chan
);
6017 l2cap_send_ack(chan
);
6020 case L2CAP_EV_RECV_RNR
:
6021 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6022 l2cap_pass_to_tx(chan
, control
);
6023 if (control
->poll
) {
6024 l2cap_send_srej_tail(chan
);
6026 struct l2cap_ctrl rr_control
;
6027 memset(&rr_control
, 0, sizeof(rr_control
));
6028 rr_control
.sframe
= 1;
6029 rr_control
.super
= L2CAP_SUPER_RR
;
6030 rr_control
.reqseq
= chan
->buffer_seq
;
6031 l2cap_send_sframe(chan
, &rr_control
);
6035 case L2CAP_EV_RECV_REJ
:
6036 l2cap_handle_rej(chan
, control
);
6038 case L2CAP_EV_RECV_SREJ
:
6039 l2cap_handle_srej(chan
, control
);
6043 if (skb
&& !skb_in_use
) {
6044 BT_DBG("Freeing %p", skb
);
6051 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6053 BT_DBG("chan %p", chan
);
6055 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6058 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6060 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6062 return l2cap_resegment(chan
);
6065 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6066 struct l2cap_ctrl
*control
,
6067 struct sk_buff
*skb
, u8 event
)
6071 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6077 l2cap_process_reqseq(chan
, control
->reqseq
);
6079 if (!skb_queue_empty(&chan
->tx_q
))
6080 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6082 chan
->tx_send_head
= NULL
;
6084 /* Rewind next_tx_seq to the point expected
6087 chan
->next_tx_seq
= control
->reqseq
;
6088 chan
->unacked_frames
= 0;
6090 err
= l2cap_finish_move(chan
);
6094 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6095 l2cap_send_i_or_rr_or_rnr(chan
);
6097 if (event
== L2CAP_EV_RECV_IFRAME
)
6100 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6103 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6104 struct l2cap_ctrl
*control
,
6105 struct sk_buff
*skb
, u8 event
)
6109 if (!control
->final
)
6112 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6114 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6115 l2cap_process_reqseq(chan
, control
->reqseq
);
6117 if (!skb_queue_empty(&chan
->tx_q
))
6118 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6120 chan
->tx_send_head
= NULL
;
6122 /* Rewind next_tx_seq to the point expected
6125 chan
->next_tx_seq
= control
->reqseq
;
6126 chan
->unacked_frames
= 0;
6129 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6131 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6133 err
= l2cap_resegment(chan
);
6136 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6141 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6143 /* Make sure reqseq is for a packet that has been sent but not acked */
6146 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6147 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6150 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6151 struct sk_buff
*skb
, u8 event
)
6155 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6156 control
, skb
, event
, chan
->rx_state
);
6158 if (__valid_reqseq(chan
, control
->reqseq
)) {
6159 switch (chan
->rx_state
) {
6160 case L2CAP_RX_STATE_RECV
:
6161 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6163 case L2CAP_RX_STATE_SREJ_SENT
:
6164 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6167 case L2CAP_RX_STATE_WAIT_P
:
6168 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6170 case L2CAP_RX_STATE_WAIT_F
:
6171 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6178 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6179 control
->reqseq
, chan
->next_tx_seq
,
6180 chan
->expected_ack_seq
);
6181 l2cap_send_disconn_req(chan
, ECONNRESET
);
6187 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6188 struct sk_buff
*skb
)
6192 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6195 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6196 L2CAP_TXSEQ_EXPECTED
) {
6197 l2cap_pass_to_tx(chan
, control
);
6199 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6200 __next_seq(chan
, chan
->buffer_seq
));
6202 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6204 l2cap_reassemble_sdu(chan
, skb
, control
);
6207 kfree_skb(chan
->sdu
);
6210 chan
->sdu_last_frag
= NULL
;
6214 BT_DBG("Freeing %p", skb
);
6219 chan
->last_acked_seq
= control
->txseq
;
6220 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6225 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6227 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6231 __unpack_control(chan
, skb
);
6236 * We can just drop the corrupted I-frame here.
6237 * Receiver will miss it and start proper recovery
6238 * procedures and ask for retransmission.
6240 if (l2cap_check_fcs(chan
, skb
))
6243 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6244 len
-= L2CAP_SDULEN_SIZE
;
6246 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6247 len
-= L2CAP_FCS_SIZE
;
6249 if (len
> chan
->mps
) {
6250 l2cap_send_disconn_req(chan
, ECONNRESET
);
6254 if (!control
->sframe
) {
6257 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6258 control
->sar
, control
->reqseq
, control
->final
,
6261 /* Validate F-bit - F=0 always valid, F=1 only
6262 * valid in TX WAIT_F
6264 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6267 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6268 event
= L2CAP_EV_RECV_IFRAME
;
6269 err
= l2cap_rx(chan
, control
, skb
, event
);
6271 err
= l2cap_stream_rx(chan
, control
, skb
);
6275 l2cap_send_disconn_req(chan
, ECONNRESET
);
6277 const u8 rx_func_to_event
[4] = {
6278 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6279 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6282 /* Only I-frames are expected in streaming mode */
6283 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6286 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6287 control
->reqseq
, control
->final
, control
->poll
,
6291 BT_ERR("Trailing bytes: %d in sframe", len
);
6292 l2cap_send_disconn_req(chan
, ECONNRESET
);
6296 /* Validate F and P bits */
6297 if (control
->final
&& (control
->poll
||
6298 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6301 event
= rx_func_to_event
[control
->super
];
6302 if (l2cap_rx(chan
, control
, skb
, event
))
6303 l2cap_send_disconn_req(chan
, ECONNRESET
);
6313 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6314 struct sk_buff
*skb
)
6316 struct l2cap_chan
*chan
;
6318 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6320 if (cid
== L2CAP_CID_A2MP
) {
6321 chan
= a2mp_channel_create(conn
, skb
);
6327 l2cap_chan_lock(chan
);
6329 BT_DBG("unknown cid 0x%4.4x", cid
);
6330 /* Drop packet and return */
6336 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6338 if (chan
->state
!= BT_CONNECTED
)
6341 switch (chan
->mode
) {
6342 case L2CAP_MODE_BASIC
:
6343 /* If socket recv buffers overflows we drop data here
6344 * which is *bad* because L2CAP has to be reliable.
6345 * But we don't have any other choice. L2CAP doesn't
6346 * provide flow control mechanism. */
6348 if (chan
->imtu
< skb
->len
)
6351 if (!chan
->ops
->recv(chan
, skb
))
6355 case L2CAP_MODE_ERTM
:
6356 case L2CAP_MODE_STREAMING
:
6357 l2cap_data_rcv(chan
, skb
);
6361 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6369 l2cap_chan_unlock(chan
);
6372 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6373 struct sk_buff
*skb
)
6375 struct l2cap_chan
*chan
;
6377 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
6381 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6383 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6386 if (chan
->imtu
< skb
->len
)
6389 if (!chan
->ops
->recv(chan
, skb
))
6396 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6397 struct sk_buff
*skb
)
6399 struct l2cap_chan
*chan
;
6401 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6402 conn
->src
, conn
->dst
);
6406 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6408 if (chan
->imtu
< skb
->len
)
6411 if (!chan
->ops
->recv(chan
, skb
))
6418 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6420 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6424 skb_pull(skb
, L2CAP_HDR_SIZE
);
6425 cid
= __le16_to_cpu(lh
->cid
);
6426 len
= __le16_to_cpu(lh
->len
);
6428 if (len
!= skb
->len
) {
6433 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6436 case L2CAP_CID_LE_SIGNALING
:
6437 l2cap_le_sig_channel(conn
, skb
);
6439 case L2CAP_CID_SIGNALING
:
6440 l2cap_sig_channel(conn
, skb
);
6443 case L2CAP_CID_CONN_LESS
:
6444 psm
= get_unaligned((__le16
*) skb
->data
);
6445 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6446 l2cap_conless_channel(conn
, psm
, skb
);
6450 l2cap_att_channel(conn
, skb
);
6454 if (smp_sig_channel(conn
, skb
))
6455 l2cap_conn_del(conn
->hcon
, EACCES
);
6459 l2cap_data_channel(conn
, cid
, skb
);
6464 /* ---- L2CAP interface with lower layer (HCI) ---- */
6466 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6468 int exact
= 0, lm1
= 0, lm2
= 0;
6469 struct l2cap_chan
*c
;
6471 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6473 /* Find listening sockets and check their link_mode */
6474 read_lock(&chan_list_lock
);
6475 list_for_each_entry(c
, &chan_list
, global_l
) {
6476 struct sock
*sk
= c
->sk
;
6478 if (c
->state
!= BT_LISTEN
)
6481 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
6482 lm1
|= HCI_LM_ACCEPT
;
6483 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6484 lm1
|= HCI_LM_MASTER
;
6486 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
6487 lm2
|= HCI_LM_ACCEPT
;
6488 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6489 lm2
|= HCI_LM_MASTER
;
6492 read_unlock(&chan_list_lock
);
6494 return exact
? lm1
: lm2
;
6497 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6499 struct l2cap_conn
*conn
;
6501 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6504 conn
= l2cap_conn_add(hcon
);
6506 l2cap_conn_ready(conn
);
6508 l2cap_conn_del(hcon
, bt_to_errno(status
));
6512 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6514 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6516 BT_DBG("hcon %p", hcon
);
6519 return HCI_ERROR_REMOTE_USER_TERM
;
6520 return conn
->disc_reason
;
6523 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6525 BT_DBG("hcon %p reason %d", hcon
, reason
);
6527 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6530 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6532 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6535 if (encrypt
== 0x00) {
6536 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6537 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6538 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6539 l2cap_chan_close(chan
, ECONNREFUSED
);
6541 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6542 __clear_chan_timer(chan
);
6546 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6548 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6549 struct l2cap_chan
*chan
;
6554 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6556 if (hcon
->type
== LE_LINK
) {
6557 if (!status
&& encrypt
)
6558 smp_distribute_keys(conn
, 0);
6559 cancel_delayed_work(&conn
->security_timer
);
6562 mutex_lock(&conn
->chan_lock
);
6564 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6565 l2cap_chan_lock(chan
);
6567 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6568 state_to_string(chan
->state
));
6570 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6571 l2cap_chan_unlock(chan
);
6575 if (chan
->scid
== L2CAP_CID_ATT
) {
6576 if (!status
&& encrypt
) {
6577 chan
->sec_level
= hcon
->sec_level
;
6578 l2cap_chan_ready(chan
);
6581 l2cap_chan_unlock(chan
);
6585 if (!__l2cap_no_conn_pending(chan
)) {
6586 l2cap_chan_unlock(chan
);
6590 if (!status
&& (chan
->state
== BT_CONNECTED
||
6591 chan
->state
== BT_CONFIG
)) {
6592 struct sock
*sk
= chan
->sk
;
6594 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
6595 sk
->sk_state_change(sk
);
6597 l2cap_check_encryption(chan
, encrypt
);
6598 l2cap_chan_unlock(chan
);
6602 if (chan
->state
== BT_CONNECT
) {
6604 l2cap_start_connection(chan
);
6606 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6608 } else if (chan
->state
== BT_CONNECT2
) {
6609 struct sock
*sk
= chan
->sk
;
6610 struct l2cap_conn_rsp rsp
;
6616 if (test_bit(BT_SK_DEFER_SETUP
,
6617 &bt_sk(sk
)->flags
)) {
6618 res
= L2CAP_CR_PEND
;
6619 stat
= L2CAP_CS_AUTHOR_PEND
;
6620 chan
->ops
->defer(chan
);
6622 __l2cap_state_change(chan
, BT_CONFIG
);
6623 res
= L2CAP_CR_SUCCESS
;
6624 stat
= L2CAP_CS_NO_INFO
;
6627 __l2cap_state_change(chan
, BT_DISCONN
);
6628 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6629 res
= L2CAP_CR_SEC_BLOCK
;
6630 stat
= L2CAP_CS_NO_INFO
;
6635 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6636 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6637 rsp
.result
= cpu_to_le16(res
);
6638 rsp
.status
= cpu_to_le16(stat
);
6639 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6642 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6643 res
== L2CAP_CR_SUCCESS
) {
6645 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6646 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6648 l2cap_build_conf_req(chan
, buf
),
6650 chan
->num_conf_req
++;
6654 l2cap_chan_unlock(chan
);
6657 mutex_unlock(&conn
->chan_lock
);
6662 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6664 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6665 struct l2cap_hdr
*hdr
;
6668 /* For AMP controller do not create l2cap conn */
6669 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6673 conn
= l2cap_conn_add(hcon
);
6678 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6682 case ACL_START_NO_FLUSH
:
6685 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6686 kfree_skb(conn
->rx_skb
);
6687 conn
->rx_skb
= NULL
;
6689 l2cap_conn_unreliable(conn
, ECOMM
);
6692 /* Start fragment always begin with Basic L2CAP header */
6693 if (skb
->len
< L2CAP_HDR_SIZE
) {
6694 BT_ERR("Frame is too short (len %d)", skb
->len
);
6695 l2cap_conn_unreliable(conn
, ECOMM
);
6699 hdr
= (struct l2cap_hdr
*) skb
->data
;
6700 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6702 if (len
== skb
->len
) {
6703 /* Complete frame received */
6704 l2cap_recv_frame(conn
, skb
);
6708 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6710 if (skb
->len
> len
) {
6711 BT_ERR("Frame is too long (len %d, expected len %d)",
6713 l2cap_conn_unreliable(conn
, ECOMM
);
6717 /* Allocate skb for the complete frame (with header) */
6718 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6722 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6724 conn
->rx_len
= len
- skb
->len
;
6728 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6730 if (!conn
->rx_len
) {
6731 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6732 l2cap_conn_unreliable(conn
, ECOMM
);
6736 if (skb
->len
> conn
->rx_len
) {
6737 BT_ERR("Fragment is too long (len %d, expected %d)",
6738 skb
->len
, conn
->rx_len
);
6739 kfree_skb(conn
->rx_skb
);
6740 conn
->rx_skb
= NULL
;
6742 l2cap_conn_unreliable(conn
, ECOMM
);
6746 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6748 conn
->rx_len
-= skb
->len
;
6750 if (!conn
->rx_len
) {
6751 /* Complete frame received */
6752 l2cap_recv_frame(conn
, conn
->rx_skb
);
6753 conn
->rx_skb
= NULL
;
6763 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6765 struct l2cap_chan
*c
;
6767 read_lock(&chan_list_lock
);
6769 list_for_each_entry(c
, &chan_list
, global_l
) {
6770 struct sock
*sk
= c
->sk
;
6772 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6773 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
6774 c
->state
, __le16_to_cpu(c
->psm
),
6775 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6776 c
->sec_level
, c
->mode
);
6779 read_unlock(&chan_list_lock
);
6784 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6786 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6789 static const struct file_operations l2cap_debugfs_fops
= {
6790 .open
= l2cap_debugfs_open
,
6792 .llseek
= seq_lseek
,
6793 .release
= single_release
,
6796 static struct dentry
*l2cap_debugfs
;
6798 int __init
l2cap_init(void)
6802 err
= l2cap_init_sockets();
6807 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6808 NULL
, &l2cap_debugfs_fops
);
6810 BT_ERR("Failed to create L2CAP debug file");
6816 void l2cap_exit(void)
6818 debugfs_remove(l2cap_debugfs
);
6819 l2cap_cleanup_sockets();
6822 module_param(disable_ertm
, bool, 0644);
6823 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");