Linux 3.11-rc3
[cris-mirror.git] / net / bluetooth / l2cap_core.c
blob8c3499bec89319289073b2f1e7f3f202cd179ca6
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
43 bool disable_ertm;
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
66 struct l2cap_chan *c;
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
72 return NULL;
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
78 struct l2cap_chan *c;
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
84 return NULL;
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
92 struct l2cap_chan *c;
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
100 return c;
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
117 return c;
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
129 return NULL;
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
143 return c;
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
154 return NULL;
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
159 int err;
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
194 chan->scid = scid;
196 write_unlock(&chan_list_lock);
198 return 0;
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
210 return 0;
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
218 chan->state = state;
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
235 sk->sk_err = err;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
268 struct sk_buff *skb;
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
275 return NULL;
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
309 return 0;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
355 return seq;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 u16 i;
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
451 return chan;
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
464 kfree(chan);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471 kref_get(&c->kref);
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
500 chan->conn = conn;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
509 else
510 chan->scid = l2cap_alloc_cid(conn);
511 } else {
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
516 break;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 break;
532 default:
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
568 if (conn) {
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
575 chan->conn = NULL;
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 return;
596 switch(chan->mode) {
597 case L2CAP_MODE_BASIC:
598 break;
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
610 /* fall through */
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
614 break;
617 return;
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
623 struct sock *sk = chan->sk;
625 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
626 sk);
628 switch (chan->state) {
629 case BT_LISTEN:
630 chan->ops->teardown(chan, 0);
631 break;
633 case BT_CONNECTED:
634 case BT_CONFIG:
635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 conn->hcon->type == ACL_LINK) {
637 __set_chan_timer(chan, sk->sk_sndtimeo);
638 l2cap_send_disconn_req(chan, reason);
639 } else
640 l2cap_chan_del(chan, reason);
641 break;
643 case BT_CONNECT2:
644 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 conn->hcon->type == ACL_LINK) {
646 struct l2cap_conn_rsp rsp;
647 __u16 result;
649 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 result = L2CAP_CR_SEC_BLOCK;
651 else
652 result = L2CAP_CR_BAD_PSM;
653 l2cap_state_change(chan, BT_DISCONN);
655 rsp.scid = cpu_to_le16(chan->dcid);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.result = cpu_to_le16(result);
658 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
660 sizeof(rsp), &rsp);
663 l2cap_chan_del(chan, reason);
664 break;
666 case BT_CONNECT:
667 case BT_DISCONN:
668 l2cap_chan_del(chan, reason);
669 break;
671 default:
672 chan->ops->teardown(chan, 0);
673 break;
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
679 if (chan->chan_type == L2CAP_CHAN_RAW) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
685 default:
686 return HCI_AT_NO_BONDING;
688 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 if (chan->sec_level == BT_SECURITY_LOW)
690 chan->sec_level = BT_SECURITY_SDP;
692 if (chan->sec_level == BT_SECURITY_HIGH)
693 return HCI_AT_NO_BONDING_MITM;
694 else
695 return HCI_AT_NO_BONDING;
696 } else {
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_GENERAL_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_GENERAL_BONDING;
702 default:
703 return HCI_AT_NO_BONDING;
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
711 struct l2cap_conn *conn = chan->conn;
712 __u8 auth_type;
714 auth_type = l2cap_get_auth_type(chan);
716 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
721 u8 id;
723 /* Get next available identificator.
724 * 1 - 128 are used by kernel.
725 * 129 - 199 are reserved.
726 * 200 - 254 are used by utilities like l2ping, etc.
729 spin_lock(&conn->lock);
731 if (++conn->tx_ident > 128)
732 conn->tx_ident = 1;
734 id = conn->tx_ident;
736 spin_unlock(&conn->lock);
738 return id;
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
742 void *data)
744 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
745 u8 flags;
747 BT_DBG("code 0x%2.2x", code);
749 if (!skb)
750 return;
752 if (lmp_no_flush_capable(conn->hcon->hdev))
753 flags = ACL_START_NO_FLUSH;
754 else
755 flags = ACL_START;
757 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 skb->priority = HCI_PRIO_MAX;
760 hci_send_acl(conn->hchan, skb, flags);
763 static bool __chan_is_moving(struct l2cap_chan *chan)
765 return chan->move_state != L2CAP_MOVE_STABLE &&
766 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
771 struct hci_conn *hcon = chan->conn->hcon;
772 u16 flags;
774 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
775 skb->priority);
777 if (chan->hs_hcon && !__chan_is_moving(chan)) {
778 if (chan->hs_hchan)
779 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
780 else
781 kfree_skb(skb);
783 return;
786 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 lmp_no_flush_capable(hcon->hdev))
788 flags = ACL_START_NO_FLUSH;
789 else
790 flags = ACL_START;
792 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 hci_send_acl(chan->conn->hchan, skb, flags);
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
798 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
801 if (enh & L2CAP_CTRL_FRAME_TYPE) {
802 /* S-Frame */
803 control->sframe = 1;
804 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
807 control->sar = 0;
808 control->txseq = 0;
809 } else {
810 /* I-Frame */
811 control->sframe = 0;
812 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
815 control->poll = 0;
816 control->super = 0;
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
822 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
825 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
826 /* S-Frame */
827 control->sframe = 1;
828 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
831 control->sar = 0;
832 control->txseq = 0;
833 } else {
834 /* I-Frame */
835 control->sframe = 0;
836 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
839 control->poll = 0;
840 control->super = 0;
844 static inline void __unpack_control(struct l2cap_chan *chan,
845 struct sk_buff *skb)
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 __unpack_extended_control(get_unaligned_le32(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
851 } else {
852 __unpack_enhanced_control(get_unaligned_le16(skb->data),
853 &bt_cb(skb)->control);
854 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
860 u32 packed;
862 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
865 if (control->sframe) {
866 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
869 } else {
870 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
874 return packed;
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
879 u16 packed;
881 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
884 if (control->sframe) {
885 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 packed |= L2CAP_CTRL_FRAME_TYPE;
888 } else {
889 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
893 return packed;
896 static inline void __pack_control(struct l2cap_chan *chan,
897 struct l2cap_ctrl *control,
898 struct sk_buff *skb)
900 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 put_unaligned_le32(__pack_extended_control(control),
902 skb->data + L2CAP_HDR_SIZE);
903 } else {
904 put_unaligned_le16(__pack_enhanced_control(control),
905 skb->data + L2CAP_HDR_SIZE);
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 return L2CAP_EXT_HDR_SIZE;
913 else
914 return L2CAP_ENH_HDR_SIZE;
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
918 u32 control)
920 struct sk_buff *skb;
921 struct l2cap_hdr *lh;
922 int hlen = __ertm_hdr_size(chan);
924 if (chan->fcs == L2CAP_FCS_CRC16)
925 hlen += L2CAP_FCS_SIZE;
927 skb = bt_skb_alloc(hlen, GFP_KERNEL);
929 if (!skb)
930 return ERR_PTR(-ENOMEM);
932 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 lh->cid = cpu_to_le16(chan->dcid);
936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
938 else
939 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
941 if (chan->fcs == L2CAP_FCS_CRC16) {
942 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
946 skb->priority = HCI_PRIO_MAX;
947 return skb;
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 struct l2cap_ctrl *control)
953 struct sk_buff *skb;
954 u32 control_field;
956 BT_DBG("chan %p, control %p", chan, control);
958 if (!control->sframe)
959 return;
961 if (__chan_is_moving(chan))
962 return;
964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
965 !control->poll)
966 control->final = 1;
968 if (control->super == L2CAP_SUPER_RR)
969 clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 else if (control->super == L2CAP_SUPER_RNR)
971 set_bit(CONN_RNR_SENT, &chan->conn_state);
973 if (control->super != L2CAP_SUPER_SREJ) {
974 chan->last_acked_seq = control->reqseq;
975 __clear_ack_timer(chan);
978 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 control->final, control->poll, control->super);
981 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 control_field = __pack_extended_control(control);
983 else
984 control_field = __pack_enhanced_control(control);
986 skb = l2cap_create_sframe_pdu(chan, control_field);
987 if (!IS_ERR(skb))
988 l2cap_do_send(chan, skb);
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
993 struct l2cap_ctrl control;
995 BT_DBG("chan %p, poll %d", chan, poll);
997 memset(&control, 0, sizeof(control));
998 control.sframe = 1;
999 control.poll = poll;
1001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 control.super = L2CAP_SUPER_RNR;
1003 else
1004 control.super = L2CAP_SUPER_RR;
1006 control.reqseq = chan->buffer_seq;
1007 l2cap_send_sframe(chan, &control);
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1012 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1015 static bool __amp_capable(struct l2cap_chan *chan)
1017 struct l2cap_conn *conn = chan->conn;
1019 if (enable_hs &&
1020 hci_amp_capable() &&
1021 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1022 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1023 return true;
1024 else
1025 return false;
1028 static bool l2cap_check_efs(struct l2cap_chan *chan)
1030 /* Check EFS parameters */
1031 return true;
1034 void l2cap_send_conn_req(struct l2cap_chan *chan)
1036 struct l2cap_conn *conn = chan->conn;
1037 struct l2cap_conn_req req;
1039 req.scid = cpu_to_le16(chan->scid);
1040 req.psm = chan->psm;
1042 chan->ident = l2cap_get_ident(conn);
1044 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1046 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1049 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1051 struct l2cap_create_chan_req req;
1052 req.scid = cpu_to_le16(chan->scid);
1053 req.psm = chan->psm;
1054 req.amp_id = amp_id;
1056 chan->ident = l2cap_get_ident(chan->conn);
1058 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1059 sizeof(req), &req);
1062 static void l2cap_move_setup(struct l2cap_chan *chan)
1064 struct sk_buff *skb;
1066 BT_DBG("chan %p", chan);
1068 if (chan->mode != L2CAP_MODE_ERTM)
1069 return;
1071 __clear_retrans_timer(chan);
1072 __clear_monitor_timer(chan);
1073 __clear_ack_timer(chan);
1075 chan->retry_count = 0;
1076 skb_queue_walk(&chan->tx_q, skb) {
1077 if (bt_cb(skb)->control.retries)
1078 bt_cb(skb)->control.retries = 1;
1079 else
1080 break;
1083 chan->expected_tx_seq = chan->buffer_seq;
1085 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1086 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1087 l2cap_seq_list_clear(&chan->retrans_list);
1088 l2cap_seq_list_clear(&chan->srej_list);
1089 skb_queue_purge(&chan->srej_q);
1091 chan->tx_state = L2CAP_TX_STATE_XMIT;
1092 chan->rx_state = L2CAP_RX_STATE_MOVE;
1094 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1097 static void l2cap_move_done(struct l2cap_chan *chan)
1099 u8 move_role = chan->move_role;
1100 BT_DBG("chan %p", chan);
1102 chan->move_state = L2CAP_MOVE_STABLE;
1103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1105 if (chan->mode != L2CAP_MODE_ERTM)
1106 return;
1108 switch (move_role) {
1109 case L2CAP_MOVE_ROLE_INITIATOR:
1110 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1111 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1112 break;
1113 case L2CAP_MOVE_ROLE_RESPONDER:
1114 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1115 break;
1119 static void l2cap_chan_ready(struct l2cap_chan *chan)
1121 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 chan->conf_state = 0;
1123 __clear_chan_timer(chan);
1125 chan->state = BT_CONNECTED;
1127 chan->ops->ready(chan);
1130 static void l2cap_start_connection(struct l2cap_chan *chan)
1132 if (__amp_capable(chan)) {
1133 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1134 a2mp_discover_amp(chan);
1135 } else {
1136 l2cap_send_conn_req(chan);
1140 static void l2cap_do_start(struct l2cap_chan *chan)
1142 struct l2cap_conn *conn = chan->conn;
1144 if (conn->hcon->type == LE_LINK) {
1145 l2cap_chan_ready(chan);
1146 return;
1149 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1150 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1151 return;
1153 if (l2cap_chan_check_security(chan) &&
1154 __l2cap_no_conn_pending(chan)) {
1155 l2cap_start_connection(chan);
1157 } else {
1158 struct l2cap_info_req req;
1159 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1162 conn->info_ident = l2cap_get_ident(conn);
1164 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1166 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1167 sizeof(req), &req);
1171 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1173 u32 local_feat_mask = l2cap_feat_mask;
1174 if (!disable_ertm)
1175 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1177 switch (mode) {
1178 case L2CAP_MODE_ERTM:
1179 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1180 case L2CAP_MODE_STREAMING:
1181 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1182 default:
1183 return 0x00;
1187 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1189 struct sock *sk = chan->sk;
1190 struct l2cap_conn *conn = chan->conn;
1191 struct l2cap_disconn_req req;
1193 if (!conn)
1194 return;
1196 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1202 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1203 l2cap_state_change(chan, BT_DISCONN);
1204 return;
1207 req.dcid = cpu_to_le16(chan->dcid);
1208 req.scid = cpu_to_le16(chan->scid);
1209 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1210 sizeof(req), &req);
1212 lock_sock(sk);
1213 __l2cap_state_change(chan, BT_DISCONN);
1214 __l2cap_chan_set_err(chan, err);
1215 release_sock(sk);
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn *conn)
1221 struct l2cap_chan *chan, *tmp;
1223 BT_DBG("conn %p", conn);
1225 mutex_lock(&conn->chan_lock);
1227 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1228 struct sock *sk = chan->sk;
1230 l2cap_chan_lock(chan);
1232 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1233 l2cap_chan_unlock(chan);
1234 continue;
1237 if (chan->state == BT_CONNECT) {
1238 if (!l2cap_chan_check_security(chan) ||
1239 !__l2cap_no_conn_pending(chan)) {
1240 l2cap_chan_unlock(chan);
1241 continue;
1244 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1245 && test_bit(CONF_STATE2_DEVICE,
1246 &chan->conf_state)) {
1247 l2cap_chan_close(chan, ECONNRESET);
1248 l2cap_chan_unlock(chan);
1249 continue;
1252 l2cap_start_connection(chan);
1254 } else if (chan->state == BT_CONNECT2) {
1255 struct l2cap_conn_rsp rsp;
1256 char buf[128];
1257 rsp.scid = cpu_to_le16(chan->dcid);
1258 rsp.dcid = cpu_to_le16(chan->scid);
1260 if (l2cap_chan_check_security(chan)) {
1261 lock_sock(sk);
1262 if (test_bit(BT_SK_DEFER_SETUP,
1263 &bt_sk(sk)->flags)) {
1264 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1265 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1266 chan->ops->defer(chan);
1268 } else {
1269 __l2cap_state_change(chan, BT_CONFIG);
1270 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1271 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1273 release_sock(sk);
1274 } else {
1275 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1276 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1279 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1280 sizeof(rsp), &rsp);
1282 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1283 rsp.result != L2CAP_CR_SUCCESS) {
1284 l2cap_chan_unlock(chan);
1285 continue;
1288 set_bit(CONF_REQ_SENT, &chan->conf_state);
1289 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1290 l2cap_build_conf_req(chan, buf), buf);
1291 chan->num_conf_req++;
1294 l2cap_chan_unlock(chan);
1297 mutex_unlock(&conn->chan_lock);
1300 /* Find socket with cid and source/destination bdaddr.
1301 * Returns closest match, locked.
1303 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1304 bdaddr_t *src,
1305 bdaddr_t *dst)
1307 struct l2cap_chan *c, *c1 = NULL;
1309 read_lock(&chan_list_lock);
1311 list_for_each_entry(c, &chan_list, global_l) {
1312 struct sock *sk = c->sk;
1314 if (state && c->state != state)
1315 continue;
1317 if (c->scid == cid) {
1318 int src_match, dst_match;
1319 int src_any, dst_any;
1321 /* Exact match. */
1322 src_match = !bacmp(&bt_sk(sk)->src, src);
1323 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1324 if (src_match && dst_match) {
1325 read_unlock(&chan_list_lock);
1326 return c;
1329 /* Closest match */
1330 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1331 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1332 if ((src_match && dst_any) || (src_any && dst_match) ||
1333 (src_any && dst_any))
1334 c1 = c;
1338 read_unlock(&chan_list_lock);
1340 return c1;
1343 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1345 struct sock *parent;
1346 struct l2cap_chan *chan, *pchan;
1348 BT_DBG("");
1350 /* Check if we have socket listening on cid */
1351 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1352 conn->src, conn->dst);
1353 if (!pchan)
1354 return;
1356 /* Client ATT sockets should override the server one */
1357 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1358 return;
1360 parent = pchan->sk;
1362 lock_sock(parent);
1364 chan = pchan->ops->new_connection(pchan);
1365 if (!chan)
1366 goto clean;
1368 chan->dcid = L2CAP_CID_ATT;
1370 bacpy(&bt_sk(chan->sk)->src, conn->src);
1371 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1373 __l2cap_chan_add(conn, chan);
1375 clean:
1376 release_sock(parent);
1379 static void l2cap_conn_ready(struct l2cap_conn *conn)
1381 struct l2cap_chan *chan;
1382 struct hci_conn *hcon = conn->hcon;
1384 BT_DBG("conn %p", conn);
1386 /* For outgoing pairing which doesn't necessarily have an
1387 * associated socket (e.g. mgmt_pair_device).
1389 if (hcon->out && hcon->type == LE_LINK)
1390 smp_conn_security(hcon, hcon->pending_sec_level);
1392 mutex_lock(&conn->chan_lock);
1394 if (hcon->type == LE_LINK)
1395 l2cap_le_conn_ready(conn);
1397 list_for_each_entry(chan, &conn->chan_l, list) {
1399 l2cap_chan_lock(chan);
1401 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1402 l2cap_chan_unlock(chan);
1403 continue;
1406 if (hcon->type == LE_LINK) {
1407 if (smp_conn_security(hcon, chan->sec_level))
1408 l2cap_chan_ready(chan);
1410 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1411 struct sock *sk = chan->sk;
1412 __clear_chan_timer(chan);
1413 lock_sock(sk);
1414 __l2cap_state_change(chan, BT_CONNECTED);
1415 sk->sk_state_change(sk);
1416 release_sock(sk);
1418 } else if (chan->state == BT_CONNECT)
1419 l2cap_do_start(chan);
1421 l2cap_chan_unlock(chan);
1424 mutex_unlock(&conn->chan_lock);
1427 /* Notify sockets that we cannot guaranty reliability anymore */
1428 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1430 struct l2cap_chan *chan;
1432 BT_DBG("conn %p", conn);
1434 mutex_lock(&conn->chan_lock);
1436 list_for_each_entry(chan, &conn->chan_l, list) {
1437 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1438 l2cap_chan_set_err(chan, err);
1441 mutex_unlock(&conn->chan_lock);
1444 static void l2cap_info_timeout(struct work_struct *work)
1446 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1447 info_timer.work);
1449 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1450 conn->info_ident = 0;
1452 l2cap_conn_start(conn);
1456 * l2cap_user
1457 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1458 * callback is called during registration. The ->remove callback is called
1459 * during unregistration.
1460 * An l2cap_user object can either be explicitly unregistered or when the
1461 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1462 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1463 * External modules must own a reference to the l2cap_conn object if they intend
1464 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1465 * any time if they don't.
1468 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1470 struct hci_dev *hdev = conn->hcon->hdev;
1471 int ret;
1473 /* We need to check whether l2cap_conn is registered. If it is not, we
1474 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1475 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1476 * relies on the parent hci_conn object to be locked. This itself relies
1477 * on the hci_dev object to be locked. So we must lock the hci device
1478 * here, too. */
1480 hci_dev_lock(hdev);
1482 if (user->list.next || user->list.prev) {
1483 ret = -EINVAL;
1484 goto out_unlock;
1487 /* conn->hchan is NULL after l2cap_conn_del() was called */
1488 if (!conn->hchan) {
1489 ret = -ENODEV;
1490 goto out_unlock;
1493 ret = user->probe(conn, user);
1494 if (ret)
1495 goto out_unlock;
1497 list_add(&user->list, &conn->users);
1498 ret = 0;
1500 out_unlock:
1501 hci_dev_unlock(hdev);
1502 return ret;
1504 EXPORT_SYMBOL(l2cap_register_user);
1506 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1508 struct hci_dev *hdev = conn->hcon->hdev;
1510 hci_dev_lock(hdev);
1512 if (!user->list.next || !user->list.prev)
1513 goto out_unlock;
1515 list_del(&user->list);
1516 user->list.next = NULL;
1517 user->list.prev = NULL;
1518 user->remove(conn, user);
1520 out_unlock:
1521 hci_dev_unlock(hdev);
1523 EXPORT_SYMBOL(l2cap_unregister_user);
1525 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1527 struct l2cap_user *user;
1529 while (!list_empty(&conn->users)) {
1530 user = list_first_entry(&conn->users, struct l2cap_user, list);
1531 list_del(&user->list);
1532 user->list.next = NULL;
1533 user->list.prev = NULL;
1534 user->remove(conn, user);
1538 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1540 struct l2cap_conn *conn = hcon->l2cap_data;
1541 struct l2cap_chan *chan, *l;
1543 if (!conn)
1544 return;
1546 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1548 kfree_skb(conn->rx_skb);
1550 l2cap_unregister_all_users(conn);
1552 mutex_lock(&conn->chan_lock);
1554 /* Kill channels */
1555 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1556 l2cap_chan_hold(chan);
1557 l2cap_chan_lock(chan);
1559 l2cap_chan_del(chan, err);
1561 l2cap_chan_unlock(chan);
1563 chan->ops->close(chan);
1564 l2cap_chan_put(chan);
1567 mutex_unlock(&conn->chan_lock);
1569 hci_chan_del(conn->hchan);
1571 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1572 cancel_delayed_work_sync(&conn->info_timer);
1574 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1575 cancel_delayed_work_sync(&conn->security_timer);
1576 smp_chan_destroy(conn);
1579 hcon->l2cap_data = NULL;
1580 conn->hchan = NULL;
1581 l2cap_conn_put(conn);
1584 static void security_timeout(struct work_struct *work)
1586 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 security_timer.work);
1589 BT_DBG("conn %p", conn);
1591 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1592 smp_chan_destroy(conn);
1593 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1597 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1599 struct l2cap_conn *conn = hcon->l2cap_data;
1600 struct hci_chan *hchan;
1602 if (conn)
1603 return conn;
1605 hchan = hci_chan_create(hcon);
1606 if (!hchan)
1607 return NULL;
1609 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1610 if (!conn) {
1611 hci_chan_del(hchan);
1612 return NULL;
1615 kref_init(&conn->ref);
1616 hcon->l2cap_data = conn;
1617 conn->hcon = hcon;
1618 hci_conn_get(conn->hcon);
1619 conn->hchan = hchan;
1621 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1623 switch (hcon->type) {
1624 case LE_LINK:
1625 if (hcon->hdev->le_mtu) {
1626 conn->mtu = hcon->hdev->le_mtu;
1627 break;
1629 /* fall through */
1630 default:
1631 conn->mtu = hcon->hdev->acl_mtu;
1632 break;
1635 conn->src = &hcon->hdev->bdaddr;
1636 conn->dst = &hcon->dst;
1638 conn->feat_mask = 0;
1640 spin_lock_init(&conn->lock);
1641 mutex_init(&conn->chan_lock);
1643 INIT_LIST_HEAD(&conn->chan_l);
1644 INIT_LIST_HEAD(&conn->users);
1646 if (hcon->type == LE_LINK)
1647 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1648 else
1649 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1651 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1653 return conn;
1656 static void l2cap_conn_free(struct kref *ref)
1658 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1660 hci_conn_put(conn->hcon);
1661 kfree(conn);
1664 void l2cap_conn_get(struct l2cap_conn *conn)
1666 kref_get(&conn->ref);
1668 EXPORT_SYMBOL(l2cap_conn_get);
1670 void l2cap_conn_put(struct l2cap_conn *conn)
1672 kref_put(&conn->ref, l2cap_conn_free);
1674 EXPORT_SYMBOL(l2cap_conn_put);
1676 /* ---- Socket interface ---- */
1678 /* Find socket with psm and source / destination bdaddr.
1679 * Returns closest match.
1681 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1682 bdaddr_t *src,
1683 bdaddr_t *dst)
1685 struct l2cap_chan *c, *c1 = NULL;
1687 read_lock(&chan_list_lock);
1689 list_for_each_entry(c, &chan_list, global_l) {
1690 struct sock *sk = c->sk;
1692 if (state && c->state != state)
1693 continue;
1695 if (c->psm == psm) {
1696 int src_match, dst_match;
1697 int src_any, dst_any;
1699 /* Exact match. */
1700 src_match = !bacmp(&bt_sk(sk)->src, src);
1701 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1702 if (src_match && dst_match) {
1703 read_unlock(&chan_list_lock);
1704 return c;
1707 /* Closest match */
1708 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1709 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1710 if ((src_match && dst_any) || (src_any && dst_match) ||
1711 (src_any && dst_any))
1712 c1 = c;
1716 read_unlock(&chan_list_lock);
1718 return c1;
1721 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1722 bdaddr_t *dst, u8 dst_type)
1724 struct sock *sk = chan->sk;
1725 bdaddr_t *src = &bt_sk(sk)->src;
1726 struct l2cap_conn *conn;
1727 struct hci_conn *hcon;
1728 struct hci_dev *hdev;
1729 __u8 auth_type;
1730 int err;
1732 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1733 dst_type, __le16_to_cpu(psm));
1735 hdev = hci_get_route(dst, src);
1736 if (!hdev)
1737 return -EHOSTUNREACH;
1739 hci_dev_lock(hdev);
1741 l2cap_chan_lock(chan);
1743 /* PSM must be odd and lsb of upper byte must be 0 */
1744 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1745 chan->chan_type != L2CAP_CHAN_RAW) {
1746 err = -EINVAL;
1747 goto done;
1750 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1751 err = -EINVAL;
1752 goto done;
1755 switch (chan->mode) {
1756 case L2CAP_MODE_BASIC:
1757 break;
1758 case L2CAP_MODE_ERTM:
1759 case L2CAP_MODE_STREAMING:
1760 if (!disable_ertm)
1761 break;
1762 /* fall through */
1763 default:
1764 err = -ENOTSUPP;
1765 goto done;
1768 switch (chan->state) {
1769 case BT_CONNECT:
1770 case BT_CONNECT2:
1771 case BT_CONFIG:
1772 /* Already connecting */
1773 err = 0;
1774 goto done;
1776 case BT_CONNECTED:
1777 /* Already connected */
1778 err = -EISCONN;
1779 goto done;
1781 case BT_OPEN:
1782 case BT_BOUND:
1783 /* Can connect */
1784 break;
1786 default:
1787 err = -EBADFD;
1788 goto done;
1791 /* Set destination address and psm */
1792 lock_sock(sk);
1793 bacpy(&bt_sk(sk)->dst, dst);
1794 release_sock(sk);
1796 chan->psm = psm;
1797 chan->dcid = cid;
1799 auth_type = l2cap_get_auth_type(chan);
1801 if (bdaddr_type_is_le(dst_type))
1802 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1803 chan->sec_level, auth_type);
1804 else
1805 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1806 chan->sec_level, auth_type);
1808 if (IS_ERR(hcon)) {
1809 err = PTR_ERR(hcon);
1810 goto done;
1813 conn = l2cap_conn_add(hcon);
1814 if (!conn) {
1815 hci_conn_drop(hcon);
1816 err = -ENOMEM;
1817 goto done;
1820 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1821 hci_conn_drop(hcon);
1822 err = -EBUSY;
1823 goto done;
1826 /* Update source addr of the socket */
1827 bacpy(src, conn->src);
1829 l2cap_chan_unlock(chan);
1830 l2cap_chan_add(conn, chan);
1831 l2cap_chan_lock(chan);
1833 /* l2cap_chan_add takes its own ref so we can drop this one */
1834 hci_conn_drop(hcon);
1836 l2cap_state_change(chan, BT_CONNECT);
1837 __set_chan_timer(chan, sk->sk_sndtimeo);
1839 if (hcon->state == BT_CONNECTED) {
1840 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1841 __clear_chan_timer(chan);
1842 if (l2cap_chan_check_security(chan))
1843 l2cap_state_change(chan, BT_CONNECTED);
1844 } else
1845 l2cap_do_start(chan);
1848 err = 0;
1850 done:
1851 l2cap_chan_unlock(chan);
1852 hci_dev_unlock(hdev);
1853 hci_dev_put(hdev);
1854 return err;
1857 int __l2cap_wait_ack(struct sock *sk)
1859 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1860 DECLARE_WAITQUEUE(wait, current);
1861 int err = 0;
1862 int timeo = HZ/5;
1864 add_wait_queue(sk_sleep(sk), &wait);
1865 set_current_state(TASK_INTERRUPTIBLE);
1866 while (chan->unacked_frames > 0 && chan->conn) {
1867 if (!timeo)
1868 timeo = HZ/5;
1870 if (signal_pending(current)) {
1871 err = sock_intr_errno(timeo);
1872 break;
1875 release_sock(sk);
1876 timeo = schedule_timeout(timeo);
1877 lock_sock(sk);
1878 set_current_state(TASK_INTERRUPTIBLE);
1880 err = sock_error(sk);
1881 if (err)
1882 break;
1884 set_current_state(TASK_RUNNING);
1885 remove_wait_queue(sk_sleep(sk), &wait);
1886 return err;
1889 static void l2cap_monitor_timeout(struct work_struct *work)
1891 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1892 monitor_timer.work);
1894 BT_DBG("chan %p", chan);
1896 l2cap_chan_lock(chan);
1898 if (!chan->conn) {
1899 l2cap_chan_unlock(chan);
1900 l2cap_chan_put(chan);
1901 return;
1904 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1906 l2cap_chan_unlock(chan);
1907 l2cap_chan_put(chan);
1910 static void l2cap_retrans_timeout(struct work_struct *work)
1912 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1913 retrans_timer.work);
1915 BT_DBG("chan %p", chan);
1917 l2cap_chan_lock(chan);
1919 if (!chan->conn) {
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1922 return;
1925 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1926 l2cap_chan_unlock(chan);
1927 l2cap_chan_put(chan);
1930 static void l2cap_streaming_send(struct l2cap_chan *chan,
1931 struct sk_buff_head *skbs)
1933 struct sk_buff *skb;
1934 struct l2cap_ctrl *control;
1936 BT_DBG("chan %p, skbs %p", chan, skbs);
1938 if (__chan_is_moving(chan))
1939 return;
1941 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1943 while (!skb_queue_empty(&chan->tx_q)) {
1945 skb = skb_dequeue(&chan->tx_q);
1947 bt_cb(skb)->control.retries = 1;
1948 control = &bt_cb(skb)->control;
1950 control->reqseq = 0;
1951 control->txseq = chan->next_tx_seq;
1953 __pack_control(chan, control, skb);
1955 if (chan->fcs == L2CAP_FCS_CRC16) {
1956 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1957 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1960 l2cap_do_send(chan, skb);
1962 BT_DBG("Sent txseq %u", control->txseq);
1964 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1965 chan->frames_sent++;
1969 static int l2cap_ertm_send(struct l2cap_chan *chan)
1971 struct sk_buff *skb, *tx_skb;
1972 struct l2cap_ctrl *control;
1973 int sent = 0;
1975 BT_DBG("chan %p", chan);
1977 if (chan->state != BT_CONNECTED)
1978 return -ENOTCONN;
1980 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1981 return 0;
1983 if (__chan_is_moving(chan))
1984 return 0;
1986 while (chan->tx_send_head &&
1987 chan->unacked_frames < chan->remote_tx_win &&
1988 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1990 skb = chan->tx_send_head;
1992 bt_cb(skb)->control.retries = 1;
1993 control = &bt_cb(skb)->control;
1995 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1996 control->final = 1;
1998 control->reqseq = chan->buffer_seq;
1999 chan->last_acked_seq = chan->buffer_seq;
2000 control->txseq = chan->next_tx_seq;
2002 __pack_control(chan, control, skb);
2004 if (chan->fcs == L2CAP_FCS_CRC16) {
2005 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2006 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2009 /* Clone after data has been modified. Data is assumed to be
2010 read-only (for locking purposes) on cloned sk_buffs.
2012 tx_skb = skb_clone(skb, GFP_KERNEL);
2014 if (!tx_skb)
2015 break;
2017 __set_retrans_timer(chan);
2019 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2020 chan->unacked_frames++;
2021 chan->frames_sent++;
2022 sent++;
2024 if (skb_queue_is_last(&chan->tx_q, skb))
2025 chan->tx_send_head = NULL;
2026 else
2027 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2029 l2cap_do_send(chan, tx_skb);
2030 BT_DBG("Sent txseq %u", control->txseq);
2033 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2034 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2036 return sent;
2039 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2041 struct l2cap_ctrl control;
2042 struct sk_buff *skb;
2043 struct sk_buff *tx_skb;
2044 u16 seq;
2046 BT_DBG("chan %p", chan);
2048 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2049 return;
2051 if (__chan_is_moving(chan))
2052 return;
2054 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2055 seq = l2cap_seq_list_pop(&chan->retrans_list);
2057 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2058 if (!skb) {
2059 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2060 seq);
2061 continue;
2064 bt_cb(skb)->control.retries++;
2065 control = bt_cb(skb)->control;
2067 if (chan->max_tx != 0 &&
2068 bt_cb(skb)->control.retries > chan->max_tx) {
2069 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2070 l2cap_send_disconn_req(chan, ECONNRESET);
2071 l2cap_seq_list_clear(&chan->retrans_list);
2072 break;
2075 control.reqseq = chan->buffer_seq;
2076 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2077 control.final = 1;
2078 else
2079 control.final = 0;
2081 if (skb_cloned(skb)) {
2082 /* Cloned sk_buffs are read-only, so we need a
2083 * writeable copy
2085 tx_skb = skb_copy(skb, GFP_KERNEL);
2086 } else {
2087 tx_skb = skb_clone(skb, GFP_KERNEL);
2090 if (!tx_skb) {
2091 l2cap_seq_list_clear(&chan->retrans_list);
2092 break;
2095 /* Update skb contents */
2096 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2097 put_unaligned_le32(__pack_extended_control(&control),
2098 tx_skb->data + L2CAP_HDR_SIZE);
2099 } else {
2100 put_unaligned_le16(__pack_enhanced_control(&control),
2101 tx_skb->data + L2CAP_HDR_SIZE);
2104 if (chan->fcs == L2CAP_FCS_CRC16) {
2105 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2106 put_unaligned_le16(fcs, skb_put(tx_skb,
2107 L2CAP_FCS_SIZE));
2110 l2cap_do_send(chan, tx_skb);
2112 BT_DBG("Resent txseq %d", control.txseq);
2114 chan->last_acked_seq = chan->buffer_seq;
2118 static void l2cap_retransmit(struct l2cap_chan *chan,
2119 struct l2cap_ctrl *control)
2121 BT_DBG("chan %p, control %p", chan, control);
2123 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2124 l2cap_ertm_resend(chan);
2127 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2128 struct l2cap_ctrl *control)
2130 struct sk_buff *skb;
2132 BT_DBG("chan %p, control %p", chan, control);
2134 if (control->poll)
2135 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2137 l2cap_seq_list_clear(&chan->retrans_list);
2139 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2140 return;
2142 if (chan->unacked_frames) {
2143 skb_queue_walk(&chan->tx_q, skb) {
2144 if (bt_cb(skb)->control.txseq == control->reqseq ||
2145 skb == chan->tx_send_head)
2146 break;
2149 skb_queue_walk_from(&chan->tx_q, skb) {
2150 if (skb == chan->tx_send_head)
2151 break;
2153 l2cap_seq_list_append(&chan->retrans_list,
2154 bt_cb(skb)->control.txseq);
2157 l2cap_ertm_resend(chan);
2161 static void l2cap_send_ack(struct l2cap_chan *chan)
2163 struct l2cap_ctrl control;
2164 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2165 chan->last_acked_seq);
2166 int threshold;
2168 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2169 chan, chan->last_acked_seq, chan->buffer_seq);
2171 memset(&control, 0, sizeof(control));
2172 control.sframe = 1;
2174 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2175 chan->rx_state == L2CAP_RX_STATE_RECV) {
2176 __clear_ack_timer(chan);
2177 control.super = L2CAP_SUPER_RNR;
2178 control.reqseq = chan->buffer_seq;
2179 l2cap_send_sframe(chan, &control);
2180 } else {
2181 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2182 l2cap_ertm_send(chan);
2183 /* If any i-frames were sent, they included an ack */
2184 if (chan->buffer_seq == chan->last_acked_seq)
2185 frames_to_ack = 0;
2188 /* Ack now if the window is 3/4ths full.
2189 * Calculate without mul or div
2191 threshold = chan->ack_win;
2192 threshold += threshold << 1;
2193 threshold >>= 2;
2195 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2196 threshold);
2198 if (frames_to_ack >= threshold) {
2199 __clear_ack_timer(chan);
2200 control.super = L2CAP_SUPER_RR;
2201 control.reqseq = chan->buffer_seq;
2202 l2cap_send_sframe(chan, &control);
2203 frames_to_ack = 0;
2206 if (frames_to_ack)
2207 __set_ack_timer(chan);
2211 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2212 struct msghdr *msg, int len,
2213 int count, struct sk_buff *skb)
2215 struct l2cap_conn *conn = chan->conn;
2216 struct sk_buff **frag;
2217 int sent = 0;
2219 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2220 return -EFAULT;
2222 sent += count;
2223 len -= count;
2225 /* Continuation fragments (no L2CAP header) */
2226 frag = &skb_shinfo(skb)->frag_list;
2227 while (len) {
2228 struct sk_buff *tmp;
2230 count = min_t(unsigned int, conn->mtu, len);
2232 tmp = chan->ops->alloc_skb(chan, count,
2233 msg->msg_flags & MSG_DONTWAIT);
2234 if (IS_ERR(tmp))
2235 return PTR_ERR(tmp);
2237 *frag = tmp;
2239 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2240 return -EFAULT;
2242 (*frag)->priority = skb->priority;
2244 sent += count;
2245 len -= count;
2247 skb->len += (*frag)->len;
2248 skb->data_len += (*frag)->len;
2250 frag = &(*frag)->next;
2253 return sent;
2256 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2257 struct msghdr *msg, size_t len,
2258 u32 priority)
2260 struct l2cap_conn *conn = chan->conn;
2261 struct sk_buff *skb;
2262 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2263 struct l2cap_hdr *lh;
2265 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2267 count = min_t(unsigned int, (conn->mtu - hlen), len);
2269 skb = chan->ops->alloc_skb(chan, count + hlen,
2270 msg->msg_flags & MSG_DONTWAIT);
2271 if (IS_ERR(skb))
2272 return skb;
2274 skb->priority = priority;
2276 /* Create L2CAP header */
2277 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2278 lh->cid = cpu_to_le16(chan->dcid);
2279 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2280 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2282 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2283 if (unlikely(err < 0)) {
2284 kfree_skb(skb);
2285 return ERR_PTR(err);
2287 return skb;
2290 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2291 struct msghdr *msg, size_t len,
2292 u32 priority)
2294 struct l2cap_conn *conn = chan->conn;
2295 struct sk_buff *skb;
2296 int err, count;
2297 struct l2cap_hdr *lh;
2299 BT_DBG("chan %p len %zu", chan, len);
2301 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2303 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2304 msg->msg_flags & MSG_DONTWAIT);
2305 if (IS_ERR(skb))
2306 return skb;
2308 skb->priority = priority;
2310 /* Create L2CAP header */
2311 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2312 lh->cid = cpu_to_le16(chan->dcid);
2313 lh->len = cpu_to_le16(len);
2315 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2316 if (unlikely(err < 0)) {
2317 kfree_skb(skb);
2318 return ERR_PTR(err);
2320 return skb;
2323 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2324 struct msghdr *msg, size_t len,
2325 u16 sdulen)
2327 struct l2cap_conn *conn = chan->conn;
2328 struct sk_buff *skb;
2329 int err, count, hlen;
2330 struct l2cap_hdr *lh;
2332 BT_DBG("chan %p len %zu", chan, len);
2334 if (!conn)
2335 return ERR_PTR(-ENOTCONN);
2337 hlen = __ertm_hdr_size(chan);
2339 if (sdulen)
2340 hlen += L2CAP_SDULEN_SIZE;
2342 if (chan->fcs == L2CAP_FCS_CRC16)
2343 hlen += L2CAP_FCS_SIZE;
2345 count = min_t(unsigned int, (conn->mtu - hlen), len);
2347 skb = chan->ops->alloc_skb(chan, count + hlen,
2348 msg->msg_flags & MSG_DONTWAIT);
2349 if (IS_ERR(skb))
2350 return skb;
2352 /* Create L2CAP header */
2353 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2354 lh->cid = cpu_to_le16(chan->dcid);
2355 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2357 /* Control header is populated later */
2358 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2359 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2360 else
2361 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2363 if (sdulen)
2364 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2366 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2367 if (unlikely(err < 0)) {
2368 kfree_skb(skb);
2369 return ERR_PTR(err);
2372 bt_cb(skb)->control.fcs = chan->fcs;
2373 bt_cb(skb)->control.retries = 0;
2374 return skb;
2377 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2378 struct sk_buff_head *seg_queue,
2379 struct msghdr *msg, size_t len)
2381 struct sk_buff *skb;
2382 u16 sdu_len;
2383 size_t pdu_len;
2384 u8 sar;
2386 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2388 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2389 * so fragmented skbs are not used. The HCI layer's handling
2390 * of fragmented skbs is not compatible with ERTM's queueing.
2393 /* PDU size is derived from the HCI MTU */
2394 pdu_len = chan->conn->mtu;
2396 /* Constrain PDU size for BR/EDR connections */
2397 if (!chan->hs_hcon)
2398 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2400 /* Adjust for largest possible L2CAP overhead. */
2401 if (chan->fcs)
2402 pdu_len -= L2CAP_FCS_SIZE;
2404 pdu_len -= __ertm_hdr_size(chan);
2406 /* Remote device may have requested smaller PDUs */
2407 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2409 if (len <= pdu_len) {
2410 sar = L2CAP_SAR_UNSEGMENTED;
2411 sdu_len = 0;
2412 pdu_len = len;
2413 } else {
2414 sar = L2CAP_SAR_START;
2415 sdu_len = len;
2416 pdu_len -= L2CAP_SDULEN_SIZE;
2419 while (len > 0) {
2420 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2422 if (IS_ERR(skb)) {
2423 __skb_queue_purge(seg_queue);
2424 return PTR_ERR(skb);
2427 bt_cb(skb)->control.sar = sar;
2428 __skb_queue_tail(seg_queue, skb);
2430 len -= pdu_len;
2431 if (sdu_len) {
2432 sdu_len = 0;
2433 pdu_len += L2CAP_SDULEN_SIZE;
2436 if (len <= pdu_len) {
2437 sar = L2CAP_SAR_END;
2438 pdu_len = len;
2439 } else {
2440 sar = L2CAP_SAR_CONTINUE;
2444 return 0;
2447 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2448 u32 priority)
2450 struct sk_buff *skb;
2451 int err;
2452 struct sk_buff_head seg_queue;
2454 /* Connectionless channel */
2455 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2457 if (IS_ERR(skb))
2458 return PTR_ERR(skb);
2460 l2cap_do_send(chan, skb);
2461 return len;
2464 switch (chan->mode) {
2465 case L2CAP_MODE_BASIC:
2466 /* Check outgoing MTU */
2467 if (len > chan->omtu)
2468 return -EMSGSIZE;
2470 /* Create a basic PDU */
2471 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2472 if (IS_ERR(skb))
2473 return PTR_ERR(skb);
2475 l2cap_do_send(chan, skb);
2476 err = len;
2477 break;
2479 case L2CAP_MODE_ERTM:
2480 case L2CAP_MODE_STREAMING:
2481 /* Check outgoing MTU */
2482 if (len > chan->omtu) {
2483 err = -EMSGSIZE;
2484 break;
2487 __skb_queue_head_init(&seg_queue);
2489 /* Do segmentation before calling in to the state machine,
2490 * since it's possible to block while waiting for memory
2491 * allocation.
2493 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2495 /* The channel could have been closed while segmenting,
2496 * check that it is still connected.
2498 if (chan->state != BT_CONNECTED) {
2499 __skb_queue_purge(&seg_queue);
2500 err = -ENOTCONN;
2503 if (err)
2504 break;
2506 if (chan->mode == L2CAP_MODE_ERTM)
2507 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2508 else
2509 l2cap_streaming_send(chan, &seg_queue);
2511 err = len;
2513 /* If the skbs were not queued for sending, they'll still be in
2514 * seg_queue and need to be purged.
2516 __skb_queue_purge(&seg_queue);
2517 break;
2519 default:
2520 BT_DBG("bad state %1.1x", chan->mode);
2521 err = -EBADFD;
2524 return err;
2527 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2529 struct l2cap_ctrl control;
2530 u16 seq;
2532 BT_DBG("chan %p, txseq %u", chan, txseq);
2534 memset(&control, 0, sizeof(control));
2535 control.sframe = 1;
2536 control.super = L2CAP_SUPER_SREJ;
2538 for (seq = chan->expected_tx_seq; seq != txseq;
2539 seq = __next_seq(chan, seq)) {
2540 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2541 control.reqseq = seq;
2542 l2cap_send_sframe(chan, &control);
2543 l2cap_seq_list_append(&chan->srej_list, seq);
2547 chan->expected_tx_seq = __next_seq(chan, txseq);
2550 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2552 struct l2cap_ctrl control;
2554 BT_DBG("chan %p", chan);
2556 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2557 return;
2559 memset(&control, 0, sizeof(control));
2560 control.sframe = 1;
2561 control.super = L2CAP_SUPER_SREJ;
2562 control.reqseq = chan->srej_list.tail;
2563 l2cap_send_sframe(chan, &control);
2566 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2568 struct l2cap_ctrl control;
2569 u16 initial_head;
2570 u16 seq;
2572 BT_DBG("chan %p, txseq %u", chan, txseq);
2574 memset(&control, 0, sizeof(control));
2575 control.sframe = 1;
2576 control.super = L2CAP_SUPER_SREJ;
2578 /* Capture initial list head to allow only one pass through the list. */
2579 initial_head = chan->srej_list.head;
2581 do {
2582 seq = l2cap_seq_list_pop(&chan->srej_list);
2583 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2584 break;
2586 control.reqseq = seq;
2587 l2cap_send_sframe(chan, &control);
2588 l2cap_seq_list_append(&chan->srej_list, seq);
2589 } while (chan->srej_list.head != initial_head);
2592 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2594 struct sk_buff *acked_skb;
2595 u16 ackseq;
2597 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2599 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2600 return;
2602 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2603 chan->expected_ack_seq, chan->unacked_frames);
2605 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2606 ackseq = __next_seq(chan, ackseq)) {
2608 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2609 if (acked_skb) {
2610 skb_unlink(acked_skb, &chan->tx_q);
2611 kfree_skb(acked_skb);
2612 chan->unacked_frames--;
2616 chan->expected_ack_seq = reqseq;
2618 if (chan->unacked_frames == 0)
2619 __clear_retrans_timer(chan);
2621 BT_DBG("unacked_frames %u", chan->unacked_frames);
2624 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2626 BT_DBG("chan %p", chan);
2628 chan->expected_tx_seq = chan->buffer_seq;
2629 l2cap_seq_list_clear(&chan->srej_list);
2630 skb_queue_purge(&chan->srej_q);
2631 chan->rx_state = L2CAP_RX_STATE_RECV;
2634 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2635 struct l2cap_ctrl *control,
2636 struct sk_buff_head *skbs, u8 event)
2638 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2639 event);
2641 switch (event) {
2642 case L2CAP_EV_DATA_REQUEST:
2643 if (chan->tx_send_head == NULL)
2644 chan->tx_send_head = skb_peek(skbs);
2646 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2647 l2cap_ertm_send(chan);
2648 break;
2649 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2650 BT_DBG("Enter LOCAL_BUSY");
2651 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2653 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2654 /* The SREJ_SENT state must be aborted if we are to
2655 * enter the LOCAL_BUSY state.
2657 l2cap_abort_rx_srej_sent(chan);
2660 l2cap_send_ack(chan);
2662 break;
2663 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2664 BT_DBG("Exit LOCAL_BUSY");
2665 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2667 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2668 struct l2cap_ctrl local_control;
2670 memset(&local_control, 0, sizeof(local_control));
2671 local_control.sframe = 1;
2672 local_control.super = L2CAP_SUPER_RR;
2673 local_control.poll = 1;
2674 local_control.reqseq = chan->buffer_seq;
2675 l2cap_send_sframe(chan, &local_control);
2677 chan->retry_count = 1;
2678 __set_monitor_timer(chan);
2679 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2681 break;
2682 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2683 l2cap_process_reqseq(chan, control->reqseq);
2684 break;
2685 case L2CAP_EV_EXPLICIT_POLL:
2686 l2cap_send_rr_or_rnr(chan, 1);
2687 chan->retry_count = 1;
2688 __set_monitor_timer(chan);
2689 __clear_ack_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 break;
2692 case L2CAP_EV_RETRANS_TO:
2693 l2cap_send_rr_or_rnr(chan, 1);
2694 chan->retry_count = 1;
2695 __set_monitor_timer(chan);
2696 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2697 break;
2698 case L2CAP_EV_RECV_FBIT:
2699 /* Nothing to process */
2700 break;
2701 default:
2702 break;
2706 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2707 struct l2cap_ctrl *control,
2708 struct sk_buff_head *skbs, u8 event)
2710 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2711 event);
2713 switch (event) {
2714 case L2CAP_EV_DATA_REQUEST:
2715 if (chan->tx_send_head == NULL)
2716 chan->tx_send_head = skb_peek(skbs);
2717 /* Queue data, but don't send. */
2718 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2719 break;
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2724 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2728 l2cap_abort_rx_srej_sent(chan);
2731 l2cap_send_ack(chan);
2733 break;
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2738 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 struct l2cap_ctrl local_control;
2740 memset(&local_control, 0, sizeof(local_control));
2741 local_control.sframe = 1;
2742 local_control.super = L2CAP_SUPER_RR;
2743 local_control.poll = 1;
2744 local_control.reqseq = chan->buffer_seq;
2745 l2cap_send_sframe(chan, &local_control);
2747 chan->retry_count = 1;
2748 __set_monitor_timer(chan);
2749 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 break;
2752 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 l2cap_process_reqseq(chan, control->reqseq);
2755 /* Fall through */
2757 case L2CAP_EV_RECV_FBIT:
2758 if (control && control->final) {
2759 __clear_monitor_timer(chan);
2760 if (chan->unacked_frames > 0)
2761 __set_retrans_timer(chan);
2762 chan->retry_count = 0;
2763 chan->tx_state = L2CAP_TX_STATE_XMIT;
2764 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2766 break;
2767 case L2CAP_EV_EXPLICIT_POLL:
2768 /* Ignore */
2769 break;
2770 case L2CAP_EV_MONITOR_TO:
2771 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 __set_monitor_timer(chan);
2774 chan->retry_count++;
2775 } else {
2776 l2cap_send_disconn_req(chan, ECONNABORTED);
2778 break;
2779 default:
2780 break;
2784 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2785 struct sk_buff_head *skbs, u8 event)
2787 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 chan, control, skbs, event, chan->tx_state);
2790 switch (chan->tx_state) {
2791 case L2CAP_TX_STATE_XMIT:
2792 l2cap_tx_state_xmit(chan, control, skbs, event);
2793 break;
2794 case L2CAP_TX_STATE_WAIT_F:
2795 l2cap_tx_state_wait_f(chan, control, skbs, event);
2796 break;
2797 default:
2798 /* Ignore event */
2799 break;
2803 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2804 struct l2cap_ctrl *control)
2806 BT_DBG("chan %p, control %p", chan, control);
2807 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2810 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2811 struct l2cap_ctrl *control)
2813 BT_DBG("chan %p, control %p", chan, control);
2814 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2817 /* Copy frame to all raw sockets on that connection */
2818 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2820 struct sk_buff *nskb;
2821 struct l2cap_chan *chan;
2823 BT_DBG("conn %p", conn);
2825 mutex_lock(&conn->chan_lock);
2827 list_for_each_entry(chan, &conn->chan_l, list) {
2828 struct sock *sk = chan->sk;
2829 if (chan->chan_type != L2CAP_CHAN_RAW)
2830 continue;
2832 /* Don't send frame to the socket it came from */
2833 if (skb->sk == sk)
2834 continue;
2835 nskb = skb_clone(skb, GFP_KERNEL);
2836 if (!nskb)
2837 continue;
2839 if (chan->ops->recv(chan, nskb))
2840 kfree_skb(nskb);
2843 mutex_unlock(&conn->chan_lock);
2846 /* ---- L2CAP signalling commands ---- */
2847 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2848 u8 ident, u16 dlen, void *data)
2850 struct sk_buff *skb, **frag;
2851 struct l2cap_cmd_hdr *cmd;
2852 struct l2cap_hdr *lh;
2853 int len, count;
2855 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2856 conn, code, ident, dlen);
2858 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2859 return NULL;
2861 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2862 count = min_t(unsigned int, conn->mtu, len);
2864 skb = bt_skb_alloc(count, GFP_KERNEL);
2865 if (!skb)
2866 return NULL;
2868 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2869 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2871 if (conn->hcon->type == LE_LINK)
2872 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2873 else
2874 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2876 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2877 cmd->code = code;
2878 cmd->ident = ident;
2879 cmd->len = cpu_to_le16(dlen);
2881 if (dlen) {
2882 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2883 memcpy(skb_put(skb, count), data, count);
2884 data += count;
2887 len -= skb->len;
2889 /* Continuation fragments (no L2CAP header) */
2890 frag = &skb_shinfo(skb)->frag_list;
2891 while (len) {
2892 count = min_t(unsigned int, conn->mtu, len);
2894 *frag = bt_skb_alloc(count, GFP_KERNEL);
2895 if (!*frag)
2896 goto fail;
2898 memcpy(skb_put(*frag, count), data, count);
2900 len -= count;
2901 data += count;
2903 frag = &(*frag)->next;
2906 return skb;
2908 fail:
2909 kfree_skb(skb);
2910 return NULL;
2913 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2914 unsigned long *val)
2916 struct l2cap_conf_opt *opt = *ptr;
2917 int len;
2919 len = L2CAP_CONF_OPT_SIZE + opt->len;
2920 *ptr += len;
2922 *type = opt->type;
2923 *olen = opt->len;
2925 switch (opt->len) {
2926 case 1:
2927 *val = *((u8 *) opt->val);
2928 break;
2930 case 2:
2931 *val = get_unaligned_le16(opt->val);
2932 break;
2934 case 4:
2935 *val = get_unaligned_le32(opt->val);
2936 break;
2938 default:
2939 *val = (unsigned long) opt->val;
2940 break;
2943 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2944 return len;
2947 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2949 struct l2cap_conf_opt *opt = *ptr;
2951 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2953 opt->type = type;
2954 opt->len = len;
2956 switch (len) {
2957 case 1:
2958 *((u8 *) opt->val) = val;
2959 break;
2961 case 2:
2962 put_unaligned_le16(val, opt->val);
2963 break;
2965 case 4:
2966 put_unaligned_le32(val, opt->val);
2967 break;
2969 default:
2970 memcpy(opt->val, (void *) val, len);
2971 break;
2974 *ptr += L2CAP_CONF_OPT_SIZE + len;
2977 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2979 struct l2cap_conf_efs efs;
2981 switch (chan->mode) {
2982 case L2CAP_MODE_ERTM:
2983 efs.id = chan->local_id;
2984 efs.stype = chan->local_stype;
2985 efs.msdu = cpu_to_le16(chan->local_msdu);
2986 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2987 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2988 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2989 break;
2991 case L2CAP_MODE_STREAMING:
2992 efs.id = 1;
2993 efs.stype = L2CAP_SERV_BESTEFFORT;
2994 efs.msdu = cpu_to_le16(chan->local_msdu);
2995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2996 efs.acc_lat = 0;
2997 efs.flush_to = 0;
2998 break;
3000 default:
3001 return;
3004 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3005 (unsigned long) &efs);
3008 static void l2cap_ack_timeout(struct work_struct *work)
3010 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3011 ack_timer.work);
3012 u16 frames_to_ack;
3014 BT_DBG("chan %p", chan);
3016 l2cap_chan_lock(chan);
3018 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3019 chan->last_acked_seq);
3021 if (frames_to_ack)
3022 l2cap_send_rr_or_rnr(chan, 0);
3024 l2cap_chan_unlock(chan);
3025 l2cap_chan_put(chan);
3028 int l2cap_ertm_init(struct l2cap_chan *chan)
3030 int err;
3032 chan->next_tx_seq = 0;
3033 chan->expected_tx_seq = 0;
3034 chan->expected_ack_seq = 0;
3035 chan->unacked_frames = 0;
3036 chan->buffer_seq = 0;
3037 chan->frames_sent = 0;
3038 chan->last_acked_seq = 0;
3039 chan->sdu = NULL;
3040 chan->sdu_last_frag = NULL;
3041 chan->sdu_len = 0;
3043 skb_queue_head_init(&chan->tx_q);
3045 chan->local_amp_id = 0;
3046 chan->move_id = 0;
3047 chan->move_state = L2CAP_MOVE_STABLE;
3048 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3050 if (chan->mode != L2CAP_MODE_ERTM)
3051 return 0;
3053 chan->rx_state = L2CAP_RX_STATE_RECV;
3054 chan->tx_state = L2CAP_TX_STATE_XMIT;
3056 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3057 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3058 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3060 skb_queue_head_init(&chan->srej_q);
3062 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3063 if (err < 0)
3064 return err;
3066 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3067 if (err < 0)
3068 l2cap_seq_list_free(&chan->srej_list);
3070 return err;
3073 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3075 switch (mode) {
3076 case L2CAP_MODE_STREAMING:
3077 case L2CAP_MODE_ERTM:
3078 if (l2cap_mode_supported(mode, remote_feat_mask))
3079 return mode;
3080 /* fall through */
3081 default:
3082 return L2CAP_MODE_BASIC;
3086 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3088 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3091 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3093 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3096 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3097 struct l2cap_conf_rfc *rfc)
3099 if (chan->local_amp_id && chan->hs_hcon) {
3100 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3102 /* Class 1 devices have must have ERTM timeouts
3103 * exceeding the Link Supervision Timeout. The
3104 * default Link Supervision Timeout for AMP
3105 * controllers is 10 seconds.
3107 * Class 1 devices use 0xffffffff for their
3108 * best-effort flush timeout, so the clamping logic
3109 * will result in a timeout that meets the above
3110 * requirement. ERTM timeouts are 16-bit values, so
3111 * the maximum timeout is 65.535 seconds.
3114 /* Convert timeout to milliseconds and round */
3115 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3117 /* This is the recommended formula for class 2 devices
3118 * that start ERTM timers when packets are sent to the
3119 * controller.
3121 ertm_to = 3 * ertm_to + 500;
3123 if (ertm_to > 0xffff)
3124 ertm_to = 0xffff;
3126 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3127 rfc->monitor_timeout = rfc->retrans_timeout;
3128 } else {
3129 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3130 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3134 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3136 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3137 __l2cap_ews_supported(chan)) {
3138 /* use extended control field */
3139 set_bit(FLAG_EXT_CTRL, &chan->flags);
3140 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3141 } else {
3142 chan->tx_win = min_t(u16, chan->tx_win,
3143 L2CAP_DEFAULT_TX_WINDOW);
3144 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3146 chan->ack_win = chan->tx_win;
3149 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3151 struct l2cap_conf_req *req = data;
3152 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3153 void *ptr = req->data;
3154 u16 size;
3156 BT_DBG("chan %p", chan);
3158 if (chan->num_conf_req || chan->num_conf_rsp)
3159 goto done;
3161 switch (chan->mode) {
3162 case L2CAP_MODE_STREAMING:
3163 case L2CAP_MODE_ERTM:
3164 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3165 break;
3167 if (__l2cap_efs_supported(chan))
3168 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3170 /* fall through */
3171 default:
3172 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3173 break;
3176 done:
3177 if (chan->imtu != L2CAP_DEFAULT_MTU)
3178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3180 switch (chan->mode) {
3181 case L2CAP_MODE_BASIC:
3182 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3184 break;
3186 rfc.mode = L2CAP_MODE_BASIC;
3187 rfc.txwin_size = 0;
3188 rfc.max_transmit = 0;
3189 rfc.retrans_timeout = 0;
3190 rfc.monitor_timeout = 0;
3191 rfc.max_pdu_size = 0;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3194 (unsigned long) &rfc);
3195 break;
3197 case L2CAP_MODE_ERTM:
3198 rfc.mode = L2CAP_MODE_ERTM;
3199 rfc.max_transmit = chan->max_tx;
3201 __l2cap_set_ertm_timeouts(chan, &rfc);
3203 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3204 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3205 L2CAP_FCS_SIZE);
3206 rfc.max_pdu_size = cpu_to_le16(size);
3208 l2cap_txwin_setup(chan);
3210 rfc.txwin_size = min_t(u16, chan->tx_win,
3211 L2CAP_DEFAULT_TX_WINDOW);
3213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3214 (unsigned long) &rfc);
3216 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3217 l2cap_add_opt_efs(&ptr, chan);
3219 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3221 chan->tx_win);
3223 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3224 if (chan->fcs == L2CAP_FCS_NONE ||
3225 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3226 chan->fcs = L2CAP_FCS_NONE;
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3228 chan->fcs);
3230 break;
3232 case L2CAP_MODE_STREAMING:
3233 l2cap_txwin_setup(chan);
3234 rfc.mode = L2CAP_MODE_STREAMING;
3235 rfc.txwin_size = 0;
3236 rfc.max_transmit = 0;
3237 rfc.retrans_timeout = 0;
3238 rfc.monitor_timeout = 0;
3240 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3242 L2CAP_FCS_SIZE);
3243 rfc.max_pdu_size = cpu_to_le16(size);
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3248 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 l2cap_add_opt_efs(&ptr, chan);
3251 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3252 if (chan->fcs == L2CAP_FCS_NONE ||
3253 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 chan->fcs = L2CAP_FCS_NONE;
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3256 chan->fcs);
3258 break;
3261 req->dcid = cpu_to_le16(chan->dcid);
3262 req->flags = __constant_cpu_to_le16(0);
3264 return ptr - data;
3267 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3269 struct l2cap_conf_rsp *rsp = data;
3270 void *ptr = rsp->data;
3271 void *req = chan->conf_req;
3272 int len = chan->conf_len;
3273 int type, hint, olen;
3274 unsigned long val;
3275 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3276 struct l2cap_conf_efs efs;
3277 u8 remote_efs = 0;
3278 u16 mtu = L2CAP_DEFAULT_MTU;
3279 u16 result = L2CAP_CONF_SUCCESS;
3280 u16 size;
3282 BT_DBG("chan %p", chan);
3284 while (len >= L2CAP_CONF_OPT_SIZE) {
3285 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3287 hint = type & L2CAP_CONF_HINT;
3288 type &= L2CAP_CONF_MASK;
3290 switch (type) {
3291 case L2CAP_CONF_MTU:
3292 mtu = val;
3293 break;
3295 case L2CAP_CONF_FLUSH_TO:
3296 chan->flush_to = val;
3297 break;
3299 case L2CAP_CONF_QOS:
3300 break;
3302 case L2CAP_CONF_RFC:
3303 if (olen == sizeof(rfc))
3304 memcpy(&rfc, (void *) val, olen);
3305 break;
3307 case L2CAP_CONF_FCS:
3308 if (val == L2CAP_FCS_NONE)
3309 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3310 break;
3312 case L2CAP_CONF_EFS:
3313 remote_efs = 1;
3314 if (olen == sizeof(efs))
3315 memcpy(&efs, (void *) val, olen);
3316 break;
3318 case L2CAP_CONF_EWS:
3319 if (!enable_hs)
3320 return -ECONNREFUSED;
3322 set_bit(FLAG_EXT_CTRL, &chan->flags);
3323 set_bit(CONF_EWS_RECV, &chan->conf_state);
3324 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3325 chan->remote_tx_win = val;
3326 break;
3328 default:
3329 if (hint)
3330 break;
3332 result = L2CAP_CONF_UNKNOWN;
3333 *((u8 *) ptr++) = type;
3334 break;
3338 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3339 goto done;
3341 switch (chan->mode) {
3342 case L2CAP_MODE_STREAMING:
3343 case L2CAP_MODE_ERTM:
3344 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3345 chan->mode = l2cap_select_mode(rfc.mode,
3346 chan->conn->feat_mask);
3347 break;
3350 if (remote_efs) {
3351 if (__l2cap_efs_supported(chan))
3352 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3353 else
3354 return -ECONNREFUSED;
3357 if (chan->mode != rfc.mode)
3358 return -ECONNREFUSED;
3360 break;
3363 done:
3364 if (chan->mode != rfc.mode) {
3365 result = L2CAP_CONF_UNACCEPT;
3366 rfc.mode = chan->mode;
3368 if (chan->num_conf_rsp == 1)
3369 return -ECONNREFUSED;
3371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3372 (unsigned long) &rfc);
3375 if (result == L2CAP_CONF_SUCCESS) {
3376 /* Configure output options and let the other side know
3377 * which ones we don't like. */
3379 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3380 result = L2CAP_CONF_UNACCEPT;
3381 else {
3382 chan->omtu = mtu;
3383 set_bit(CONF_MTU_DONE, &chan->conf_state);
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3387 if (remote_efs) {
3388 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3389 efs.stype != L2CAP_SERV_NOTRAFIC &&
3390 efs.stype != chan->local_stype) {
3392 result = L2CAP_CONF_UNACCEPT;
3394 if (chan->num_conf_req >= 1)
3395 return -ECONNREFUSED;
3397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3398 sizeof(efs),
3399 (unsigned long) &efs);
3400 } else {
3401 /* Send PENDING Conf Rsp */
3402 result = L2CAP_CONF_PENDING;
3403 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3407 switch (rfc.mode) {
3408 case L2CAP_MODE_BASIC:
3409 chan->fcs = L2CAP_FCS_NONE;
3410 set_bit(CONF_MODE_DONE, &chan->conf_state);
3411 break;
3413 case L2CAP_MODE_ERTM:
3414 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3415 chan->remote_tx_win = rfc.txwin_size;
3416 else
3417 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3419 chan->remote_max_tx = rfc.max_transmit;
3421 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3422 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3423 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3424 rfc.max_pdu_size = cpu_to_le16(size);
3425 chan->remote_mps = size;
3427 __l2cap_set_ertm_timeouts(chan, &rfc);
3429 set_bit(CONF_MODE_DONE, &chan->conf_state);
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3432 sizeof(rfc), (unsigned long) &rfc);
3434 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3435 chan->remote_id = efs.id;
3436 chan->remote_stype = efs.stype;
3437 chan->remote_msdu = le16_to_cpu(efs.msdu);
3438 chan->remote_flush_to =
3439 le32_to_cpu(efs.flush_to);
3440 chan->remote_acc_lat =
3441 le32_to_cpu(efs.acc_lat);
3442 chan->remote_sdu_itime =
3443 le32_to_cpu(efs.sdu_itime);
3444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3445 sizeof(efs),
3446 (unsigned long) &efs);
3448 break;
3450 case L2CAP_MODE_STREAMING:
3451 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3452 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3453 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3454 rfc.max_pdu_size = cpu_to_le16(size);
3455 chan->remote_mps = size;
3457 set_bit(CONF_MODE_DONE, &chan->conf_state);
3459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 (unsigned long) &rfc);
3462 break;
3464 default:
3465 result = L2CAP_CONF_UNACCEPT;
3467 memset(&rfc, 0, sizeof(rfc));
3468 rfc.mode = chan->mode;
3471 if (result == L2CAP_CONF_SUCCESS)
3472 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3474 rsp->scid = cpu_to_le16(chan->dcid);
3475 rsp->result = cpu_to_le16(result);
3476 rsp->flags = __constant_cpu_to_le16(0);
3478 return ptr - data;
3481 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3482 void *data, u16 *result)
3484 struct l2cap_conf_req *req = data;
3485 void *ptr = req->data;
3486 int type, olen;
3487 unsigned long val;
3488 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3489 struct l2cap_conf_efs efs;
3491 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3493 while (len >= L2CAP_CONF_OPT_SIZE) {
3494 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3496 switch (type) {
3497 case L2CAP_CONF_MTU:
3498 if (val < L2CAP_DEFAULT_MIN_MTU) {
3499 *result = L2CAP_CONF_UNACCEPT;
3500 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3501 } else
3502 chan->imtu = val;
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3504 break;
3506 case L2CAP_CONF_FLUSH_TO:
3507 chan->flush_to = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3509 2, chan->flush_to);
3510 break;
3512 case L2CAP_CONF_RFC:
3513 if (olen == sizeof(rfc))
3514 memcpy(&rfc, (void *)val, olen);
3516 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3517 rfc.mode != chan->mode)
3518 return -ECONNREFUSED;
3520 chan->fcs = 0;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3523 sizeof(rfc), (unsigned long) &rfc);
3524 break;
3526 case L2CAP_CONF_EWS:
3527 chan->ack_win = min_t(u16, val, chan->ack_win);
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3529 chan->tx_win);
3530 break;
3532 case L2CAP_CONF_EFS:
3533 if (olen == sizeof(efs))
3534 memcpy(&efs, (void *)val, olen);
3536 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3537 efs.stype != L2CAP_SERV_NOTRAFIC &&
3538 efs.stype != chan->local_stype)
3539 return -ECONNREFUSED;
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3542 (unsigned long) &efs);
3543 break;
3545 case L2CAP_CONF_FCS:
3546 if (*result == L2CAP_CONF_PENDING)
3547 if (val == L2CAP_FCS_NONE)
3548 set_bit(CONF_RECV_NO_FCS,
3549 &chan->conf_state);
3550 break;
3554 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3555 return -ECONNREFUSED;
3557 chan->mode = rfc.mode;
3559 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3560 switch (rfc.mode) {
3561 case L2CAP_MODE_ERTM:
3562 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3563 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3565 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3566 chan->ack_win = min_t(u16, chan->ack_win,
3567 rfc.txwin_size);
3569 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3570 chan->local_msdu = le16_to_cpu(efs.msdu);
3571 chan->local_sdu_itime =
3572 le32_to_cpu(efs.sdu_itime);
3573 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3574 chan->local_flush_to =
3575 le32_to_cpu(efs.flush_to);
3577 break;
3579 case L2CAP_MODE_STREAMING:
3580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3584 req->dcid = cpu_to_le16(chan->dcid);
3585 req->flags = __constant_cpu_to_le16(0);
3587 return ptr - data;
3590 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3591 u16 result, u16 flags)
3593 struct l2cap_conf_rsp *rsp = data;
3594 void *ptr = rsp->data;
3596 BT_DBG("chan %p", chan);
3598 rsp->scid = cpu_to_le16(chan->dcid);
3599 rsp->result = cpu_to_le16(result);
3600 rsp->flags = cpu_to_le16(flags);
3602 return ptr - data;
3605 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3607 struct l2cap_conn_rsp rsp;
3608 struct l2cap_conn *conn = chan->conn;
3609 u8 buf[128];
3610 u8 rsp_code;
3612 rsp.scid = cpu_to_le16(chan->dcid);
3613 rsp.dcid = cpu_to_le16(chan->scid);
3614 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3615 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3617 if (chan->hs_hcon)
3618 rsp_code = L2CAP_CREATE_CHAN_RSP;
3619 else
3620 rsp_code = L2CAP_CONN_RSP;
3622 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3624 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3626 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3627 return;
3629 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3630 l2cap_build_conf_req(chan, buf), buf);
3631 chan->num_conf_req++;
3634 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3636 int type, olen;
3637 unsigned long val;
3638 /* Use sane default values in case a misbehaving remote device
3639 * did not send an RFC or extended window size option.
3641 u16 txwin_ext = chan->ack_win;
3642 struct l2cap_conf_rfc rfc = {
3643 .mode = chan->mode,
3644 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3645 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3646 .max_pdu_size = cpu_to_le16(chan->imtu),
3647 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3650 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3652 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3653 return;
3655 while (len >= L2CAP_CONF_OPT_SIZE) {
3656 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3658 switch (type) {
3659 case L2CAP_CONF_RFC:
3660 if (olen == sizeof(rfc))
3661 memcpy(&rfc, (void *)val, olen);
3662 break;
3663 case L2CAP_CONF_EWS:
3664 txwin_ext = val;
3665 break;
3669 switch (rfc.mode) {
3670 case L2CAP_MODE_ERTM:
3671 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3672 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3673 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3674 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3675 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3676 else
3677 chan->ack_win = min_t(u16, chan->ack_win,
3678 rfc.txwin_size);
3679 break;
3680 case L2CAP_MODE_STREAMING:
3681 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3685 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3686 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3687 u8 *data)
3689 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3691 if (cmd_len < sizeof(*rej))
3692 return -EPROTO;
3694 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3695 return 0;
3697 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3698 cmd->ident == conn->info_ident) {
3699 cancel_delayed_work(&conn->info_timer);
3701 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3702 conn->info_ident = 0;
3704 l2cap_conn_start(conn);
3707 return 0;
3710 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3711 struct l2cap_cmd_hdr *cmd,
3712 u8 *data, u8 rsp_code, u8 amp_id)
3714 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3715 struct l2cap_conn_rsp rsp;
3716 struct l2cap_chan *chan = NULL, *pchan;
3717 struct sock *parent, *sk = NULL;
3718 int result, status = L2CAP_CS_NO_INFO;
3720 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3721 __le16 psm = req->psm;
3723 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3725 /* Check if we have socket listening on psm */
3726 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3727 if (!pchan) {
3728 result = L2CAP_CR_BAD_PSM;
3729 goto sendresp;
3732 parent = pchan->sk;
3734 mutex_lock(&conn->chan_lock);
3735 lock_sock(parent);
3737 /* Check if the ACL is secure enough (if not SDP) */
3738 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3739 !hci_conn_check_link_mode(conn->hcon)) {
3740 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3741 result = L2CAP_CR_SEC_BLOCK;
3742 goto response;
3745 result = L2CAP_CR_NO_MEM;
3747 /* Check if we already have channel with that dcid */
3748 if (__l2cap_get_chan_by_dcid(conn, scid))
3749 goto response;
3751 chan = pchan->ops->new_connection(pchan);
3752 if (!chan)
3753 goto response;
3755 sk = chan->sk;
3757 bacpy(&bt_sk(sk)->src, conn->src);
3758 bacpy(&bt_sk(sk)->dst, conn->dst);
3759 chan->psm = psm;
3760 chan->dcid = scid;
3761 chan->local_amp_id = amp_id;
3763 __l2cap_chan_add(conn, chan);
3765 dcid = chan->scid;
3767 __set_chan_timer(chan, sk->sk_sndtimeo);
3769 chan->ident = cmd->ident;
3771 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3772 if (l2cap_chan_check_security(chan)) {
3773 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3774 __l2cap_state_change(chan, BT_CONNECT2);
3775 result = L2CAP_CR_PEND;
3776 status = L2CAP_CS_AUTHOR_PEND;
3777 chan->ops->defer(chan);
3778 } else {
3779 /* Force pending result for AMP controllers.
3780 * The connection will succeed after the
3781 * physical link is up.
3783 if (amp_id) {
3784 __l2cap_state_change(chan, BT_CONNECT2);
3785 result = L2CAP_CR_PEND;
3786 } else {
3787 __l2cap_state_change(chan, BT_CONFIG);
3788 result = L2CAP_CR_SUCCESS;
3790 status = L2CAP_CS_NO_INFO;
3792 } else {
3793 __l2cap_state_change(chan, BT_CONNECT2);
3794 result = L2CAP_CR_PEND;
3795 status = L2CAP_CS_AUTHEN_PEND;
3797 } else {
3798 __l2cap_state_change(chan, BT_CONNECT2);
3799 result = L2CAP_CR_PEND;
3800 status = L2CAP_CS_NO_INFO;
3803 response:
3804 release_sock(parent);
3805 mutex_unlock(&conn->chan_lock);
3807 sendresp:
3808 rsp.scid = cpu_to_le16(scid);
3809 rsp.dcid = cpu_to_le16(dcid);
3810 rsp.result = cpu_to_le16(result);
3811 rsp.status = cpu_to_le16(status);
3812 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3814 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3815 struct l2cap_info_req info;
3816 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3819 conn->info_ident = l2cap_get_ident(conn);
3821 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3823 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3824 sizeof(info), &info);
3827 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3828 result == L2CAP_CR_SUCCESS) {
3829 u8 buf[128];
3830 set_bit(CONF_REQ_SENT, &chan->conf_state);
3831 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3832 l2cap_build_conf_req(chan, buf), buf);
3833 chan->num_conf_req++;
3836 return chan;
3839 static int l2cap_connect_req(struct l2cap_conn *conn,
3840 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3842 struct hci_dev *hdev = conn->hcon->hdev;
3843 struct hci_conn *hcon = conn->hcon;
3845 if (cmd_len < sizeof(struct l2cap_conn_req))
3846 return -EPROTO;
3848 hci_dev_lock(hdev);
3849 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3850 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3851 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3852 hcon->dst_type, 0, NULL, 0,
3853 hcon->dev_class);
3854 hci_dev_unlock(hdev);
3856 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3857 return 0;
3860 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3861 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3862 u8 *data)
3864 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3865 u16 scid, dcid, result, status;
3866 struct l2cap_chan *chan;
3867 u8 req[128];
3868 int err;
3870 if (cmd_len < sizeof(*rsp))
3871 return -EPROTO;
3873 scid = __le16_to_cpu(rsp->scid);
3874 dcid = __le16_to_cpu(rsp->dcid);
3875 result = __le16_to_cpu(rsp->result);
3876 status = __le16_to_cpu(rsp->status);
3878 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3879 dcid, scid, result, status);
3881 mutex_lock(&conn->chan_lock);
3883 if (scid) {
3884 chan = __l2cap_get_chan_by_scid(conn, scid);
3885 if (!chan) {
3886 err = -EFAULT;
3887 goto unlock;
3889 } else {
3890 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3891 if (!chan) {
3892 err = -EFAULT;
3893 goto unlock;
3897 err = 0;
3899 l2cap_chan_lock(chan);
3901 switch (result) {
3902 case L2CAP_CR_SUCCESS:
3903 l2cap_state_change(chan, BT_CONFIG);
3904 chan->ident = 0;
3905 chan->dcid = dcid;
3906 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3908 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3909 break;
3911 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 l2cap_build_conf_req(chan, req), req);
3913 chan->num_conf_req++;
3914 break;
3916 case L2CAP_CR_PEND:
3917 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3918 break;
3920 default:
3921 l2cap_chan_del(chan, ECONNREFUSED);
3922 break;
3925 l2cap_chan_unlock(chan);
3927 unlock:
3928 mutex_unlock(&conn->chan_lock);
3930 return err;
3933 static inline void set_default_fcs(struct l2cap_chan *chan)
3935 /* FCS is enabled only in ERTM or streaming mode, if one or both
3936 * sides request it.
3938 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3939 chan->fcs = L2CAP_FCS_NONE;
3940 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3941 chan->fcs = L2CAP_FCS_CRC16;
3944 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3945 u8 ident, u16 flags)
3947 struct l2cap_conn *conn = chan->conn;
3949 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3950 flags);
3952 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3953 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3955 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3956 l2cap_build_conf_rsp(chan, data,
3957 L2CAP_CONF_SUCCESS, flags), data);
3960 static inline int l2cap_config_req(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3962 u8 *data)
3964 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3965 u16 dcid, flags;
3966 u8 rsp[64];
3967 struct l2cap_chan *chan;
3968 int len, err = 0;
3970 if (cmd_len < sizeof(*req))
3971 return -EPROTO;
3973 dcid = __le16_to_cpu(req->dcid);
3974 flags = __le16_to_cpu(req->flags);
3976 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3978 chan = l2cap_get_chan_by_scid(conn, dcid);
3979 if (!chan)
3980 return -ENOENT;
3982 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3983 struct l2cap_cmd_rej_cid rej;
3985 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3986 rej.scid = cpu_to_le16(chan->scid);
3987 rej.dcid = cpu_to_le16(chan->dcid);
3989 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3990 sizeof(rej), &rej);
3991 goto unlock;
3994 /* Reject if config buffer is too small. */
3995 len = cmd_len - sizeof(*req);
3996 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3997 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3998 l2cap_build_conf_rsp(chan, rsp,
3999 L2CAP_CONF_REJECT, flags), rsp);
4000 goto unlock;
4003 /* Store config. */
4004 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4005 chan->conf_len += len;
4007 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4008 /* Incomplete config. Send empty response. */
4009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4010 l2cap_build_conf_rsp(chan, rsp,
4011 L2CAP_CONF_SUCCESS, flags), rsp);
4012 goto unlock;
4015 /* Complete config. */
4016 len = l2cap_parse_conf_req(chan, rsp);
4017 if (len < 0) {
4018 l2cap_send_disconn_req(chan, ECONNRESET);
4019 goto unlock;
4022 chan->ident = cmd->ident;
4023 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4024 chan->num_conf_rsp++;
4026 /* Reset config buffer. */
4027 chan->conf_len = 0;
4029 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4030 goto unlock;
4032 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4033 set_default_fcs(chan);
4035 if (chan->mode == L2CAP_MODE_ERTM ||
4036 chan->mode == L2CAP_MODE_STREAMING)
4037 err = l2cap_ertm_init(chan);
4039 if (err < 0)
4040 l2cap_send_disconn_req(chan, -err);
4041 else
4042 l2cap_chan_ready(chan);
4044 goto unlock;
4047 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4048 u8 buf[64];
4049 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4050 l2cap_build_conf_req(chan, buf), buf);
4051 chan->num_conf_req++;
4054 /* Got Conf Rsp PENDING from remote side and asume we sent
4055 Conf Rsp PENDING in the code above */
4056 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4057 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4059 /* check compatibility */
4061 /* Send rsp for BR/EDR channel */
4062 if (!chan->hs_hcon)
4063 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4064 else
4065 chan->ident = cmd->ident;
4068 unlock:
4069 l2cap_chan_unlock(chan);
4070 return err;
4073 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4075 u8 *data)
4077 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4078 u16 scid, flags, result;
4079 struct l2cap_chan *chan;
4080 int len = cmd_len - sizeof(*rsp);
4081 int err = 0;
4083 if (cmd_len < sizeof(*rsp))
4084 return -EPROTO;
4086 scid = __le16_to_cpu(rsp->scid);
4087 flags = __le16_to_cpu(rsp->flags);
4088 result = __le16_to_cpu(rsp->result);
4090 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4091 result, len);
4093 chan = l2cap_get_chan_by_scid(conn, scid);
4094 if (!chan)
4095 return 0;
4097 switch (result) {
4098 case L2CAP_CONF_SUCCESS:
4099 l2cap_conf_rfc_get(chan, rsp->data, len);
4100 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4101 break;
4103 case L2CAP_CONF_PENDING:
4104 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4106 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4107 char buf[64];
4109 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4110 buf, &result);
4111 if (len < 0) {
4112 l2cap_send_disconn_req(chan, ECONNRESET);
4113 goto done;
4116 if (!chan->hs_hcon) {
4117 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4119 } else {
4120 if (l2cap_check_efs(chan)) {
4121 amp_create_logical_link(chan);
4122 chan->ident = cmd->ident;
4126 goto done;
4128 case L2CAP_CONF_UNACCEPT:
4129 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4130 char req[64];
4132 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4133 l2cap_send_disconn_req(chan, ECONNRESET);
4134 goto done;
4137 /* throw out any old stored conf requests */
4138 result = L2CAP_CONF_SUCCESS;
4139 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4140 req, &result);
4141 if (len < 0) {
4142 l2cap_send_disconn_req(chan, ECONNRESET);
4143 goto done;
4146 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4147 L2CAP_CONF_REQ, len, req);
4148 chan->num_conf_req++;
4149 if (result != L2CAP_CONF_SUCCESS)
4150 goto done;
4151 break;
4154 default:
4155 l2cap_chan_set_err(chan, ECONNRESET);
4157 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4158 l2cap_send_disconn_req(chan, ECONNRESET);
4159 goto done;
4162 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4163 goto done;
4165 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4167 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4168 set_default_fcs(chan);
4170 if (chan->mode == L2CAP_MODE_ERTM ||
4171 chan->mode == L2CAP_MODE_STREAMING)
4172 err = l2cap_ertm_init(chan);
4174 if (err < 0)
4175 l2cap_send_disconn_req(chan, -err);
4176 else
4177 l2cap_chan_ready(chan);
4180 done:
4181 l2cap_chan_unlock(chan);
4182 return err;
4185 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4186 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4187 u8 *data)
4189 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4190 struct l2cap_disconn_rsp rsp;
4191 u16 dcid, scid;
4192 struct l2cap_chan *chan;
4193 struct sock *sk;
4195 if (cmd_len != sizeof(*req))
4196 return -EPROTO;
4198 scid = __le16_to_cpu(req->scid);
4199 dcid = __le16_to_cpu(req->dcid);
4201 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4203 mutex_lock(&conn->chan_lock);
4205 chan = __l2cap_get_chan_by_scid(conn, dcid);
4206 if (!chan) {
4207 mutex_unlock(&conn->chan_lock);
4208 return 0;
4211 l2cap_chan_lock(chan);
4213 sk = chan->sk;
4215 rsp.dcid = cpu_to_le16(chan->scid);
4216 rsp.scid = cpu_to_le16(chan->dcid);
4217 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4219 lock_sock(sk);
4220 sk->sk_shutdown = SHUTDOWN_MASK;
4221 release_sock(sk);
4223 l2cap_chan_hold(chan);
4224 l2cap_chan_del(chan, ECONNRESET);
4226 l2cap_chan_unlock(chan);
4228 chan->ops->close(chan);
4229 l2cap_chan_put(chan);
4231 mutex_unlock(&conn->chan_lock);
4233 return 0;
4236 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4237 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4238 u8 *data)
4240 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4241 u16 dcid, scid;
4242 struct l2cap_chan *chan;
4244 if (cmd_len != sizeof(*rsp))
4245 return -EPROTO;
4247 scid = __le16_to_cpu(rsp->scid);
4248 dcid = __le16_to_cpu(rsp->dcid);
4250 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4252 mutex_lock(&conn->chan_lock);
4254 chan = __l2cap_get_chan_by_scid(conn, scid);
4255 if (!chan) {
4256 mutex_unlock(&conn->chan_lock);
4257 return 0;
4260 l2cap_chan_lock(chan);
4262 l2cap_chan_hold(chan);
4263 l2cap_chan_del(chan, 0);
4265 l2cap_chan_unlock(chan);
4267 chan->ops->close(chan);
4268 l2cap_chan_put(chan);
4270 mutex_unlock(&conn->chan_lock);
4272 return 0;
4275 static inline int l2cap_information_req(struct l2cap_conn *conn,
4276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 u8 *data)
4279 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4280 u16 type;
4282 if (cmd_len != sizeof(*req))
4283 return -EPROTO;
4285 type = __le16_to_cpu(req->type);
4287 BT_DBG("type 0x%4.4x", type);
4289 if (type == L2CAP_IT_FEAT_MASK) {
4290 u8 buf[8];
4291 u32 feat_mask = l2cap_feat_mask;
4292 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4293 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4294 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4295 if (!disable_ertm)
4296 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4297 | L2CAP_FEAT_FCS;
4298 if (enable_hs)
4299 feat_mask |= L2CAP_FEAT_EXT_FLOW
4300 | L2CAP_FEAT_EXT_WINDOW;
4302 put_unaligned_le32(feat_mask, rsp->data);
4303 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4304 buf);
4305 } else if (type == L2CAP_IT_FIXED_CHAN) {
4306 u8 buf[12];
4307 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4309 if (enable_hs)
4310 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4311 else
4312 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4314 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4315 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4316 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4317 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4318 buf);
4319 } else {
4320 struct l2cap_info_rsp rsp;
4321 rsp.type = cpu_to_le16(type);
4322 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4323 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4324 &rsp);
4327 return 0;
4330 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4331 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 u8 *data)
4334 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4335 u16 type, result;
4337 if (cmd_len < sizeof(*rsp))
4338 return -EPROTO;
4340 type = __le16_to_cpu(rsp->type);
4341 result = __le16_to_cpu(rsp->result);
4343 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4345 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4346 if (cmd->ident != conn->info_ident ||
4347 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4348 return 0;
4350 cancel_delayed_work(&conn->info_timer);
4352 if (result != L2CAP_IR_SUCCESS) {
4353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4354 conn->info_ident = 0;
4356 l2cap_conn_start(conn);
4358 return 0;
4361 switch (type) {
4362 case L2CAP_IT_FEAT_MASK:
4363 conn->feat_mask = get_unaligned_le32(rsp->data);
4365 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4366 struct l2cap_info_req req;
4367 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4369 conn->info_ident = l2cap_get_ident(conn);
4371 l2cap_send_cmd(conn, conn->info_ident,
4372 L2CAP_INFO_REQ, sizeof(req), &req);
4373 } else {
4374 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4375 conn->info_ident = 0;
4377 l2cap_conn_start(conn);
4379 break;
4381 case L2CAP_IT_FIXED_CHAN:
4382 conn->fixed_chan_mask = rsp->data[0];
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4386 l2cap_conn_start(conn);
4387 break;
4390 return 0;
4393 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4394 struct l2cap_cmd_hdr *cmd,
4395 u16 cmd_len, void *data)
4397 struct l2cap_create_chan_req *req = data;
4398 struct l2cap_create_chan_rsp rsp;
4399 struct l2cap_chan *chan;
4400 struct hci_dev *hdev;
4401 u16 psm, scid;
4403 if (cmd_len != sizeof(*req))
4404 return -EPROTO;
4406 if (!enable_hs)
4407 return -EINVAL;
4409 psm = le16_to_cpu(req->psm);
4410 scid = le16_to_cpu(req->scid);
4412 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4414 /* For controller id 0 make BR/EDR connection */
4415 if (req->amp_id == HCI_BREDR_ID) {
4416 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4417 req->amp_id);
4418 return 0;
4421 /* Validate AMP controller id */
4422 hdev = hci_dev_get(req->amp_id);
4423 if (!hdev)
4424 goto error;
4426 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4427 hci_dev_put(hdev);
4428 goto error;
4431 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4432 req->amp_id);
4433 if (chan) {
4434 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4435 struct hci_conn *hs_hcon;
4437 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4438 if (!hs_hcon) {
4439 hci_dev_put(hdev);
4440 return -EFAULT;
4443 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4445 mgr->bredr_chan = chan;
4446 chan->hs_hcon = hs_hcon;
4447 chan->fcs = L2CAP_FCS_NONE;
4448 conn->mtu = hdev->block_mtu;
4451 hci_dev_put(hdev);
4453 return 0;
4455 error:
4456 rsp.dcid = 0;
4457 rsp.scid = cpu_to_le16(scid);
4458 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4459 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4461 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4462 sizeof(rsp), &rsp);
4464 return -EFAULT;
4467 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4469 struct l2cap_move_chan_req req;
4470 u8 ident;
4472 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4474 ident = l2cap_get_ident(chan->conn);
4475 chan->ident = ident;
4477 req.icid = cpu_to_le16(chan->scid);
4478 req.dest_amp_id = dest_amp_id;
4480 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4481 &req);
4483 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4486 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4488 struct l2cap_move_chan_rsp rsp;
4490 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4492 rsp.icid = cpu_to_le16(chan->dcid);
4493 rsp.result = cpu_to_le16(result);
4495 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4496 sizeof(rsp), &rsp);
4499 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4501 struct l2cap_move_chan_cfm cfm;
4503 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4505 chan->ident = l2cap_get_ident(chan->conn);
4507 cfm.icid = cpu_to_le16(chan->scid);
4508 cfm.result = cpu_to_le16(result);
4510 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4511 sizeof(cfm), &cfm);
4513 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4516 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4518 struct l2cap_move_chan_cfm cfm;
4520 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4522 cfm.icid = cpu_to_le16(icid);
4523 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4525 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4526 sizeof(cfm), &cfm);
4529 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4530 u16 icid)
4532 struct l2cap_move_chan_cfm_rsp rsp;
4534 BT_DBG("icid 0x%4.4x", icid);
4536 rsp.icid = cpu_to_le16(icid);
4537 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4540 static void __release_logical_link(struct l2cap_chan *chan)
4542 chan->hs_hchan = NULL;
4543 chan->hs_hcon = NULL;
4545 /* Placeholder - release the logical link */
4548 static void l2cap_logical_fail(struct l2cap_chan *chan)
4550 /* Logical link setup failed */
4551 if (chan->state != BT_CONNECTED) {
4552 /* Create channel failure, disconnect */
4553 l2cap_send_disconn_req(chan, ECONNRESET);
4554 return;
4557 switch (chan->move_role) {
4558 case L2CAP_MOVE_ROLE_RESPONDER:
4559 l2cap_move_done(chan);
4560 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4561 break;
4562 case L2CAP_MOVE_ROLE_INITIATOR:
4563 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4564 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4565 /* Remote has only sent pending or
4566 * success responses, clean up
4568 l2cap_move_done(chan);
4571 /* Other amp move states imply that the move
4572 * has already aborted
4574 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4575 break;
4579 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4580 struct hci_chan *hchan)
4582 struct l2cap_conf_rsp rsp;
4584 chan->hs_hchan = hchan;
4585 chan->hs_hcon->l2cap_data = chan->conn;
4587 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4589 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4590 int err;
4592 set_default_fcs(chan);
4594 err = l2cap_ertm_init(chan);
4595 if (err < 0)
4596 l2cap_send_disconn_req(chan, -err);
4597 else
4598 l2cap_chan_ready(chan);
4602 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4603 struct hci_chan *hchan)
4605 chan->hs_hcon = hchan->conn;
4606 chan->hs_hcon->l2cap_data = chan->conn;
4608 BT_DBG("move_state %d", chan->move_state);
4610 switch (chan->move_state) {
4611 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4612 /* Move confirm will be sent after a success
4613 * response is received
4615 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4616 break;
4617 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4618 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4619 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4620 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4621 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4622 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4623 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4624 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4625 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4627 break;
4628 default:
4629 /* Move was not in expected state, free the channel */
4630 __release_logical_link(chan);
4632 chan->move_state = L2CAP_MOVE_STABLE;
4636 /* Call with chan locked */
4637 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4638 u8 status)
4640 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4642 if (status) {
4643 l2cap_logical_fail(chan);
4644 __release_logical_link(chan);
4645 return;
4648 if (chan->state != BT_CONNECTED) {
4649 /* Ignore logical link if channel is on BR/EDR */
4650 if (chan->local_amp_id)
4651 l2cap_logical_finish_create(chan, hchan);
4652 } else {
4653 l2cap_logical_finish_move(chan, hchan);
4657 void l2cap_move_start(struct l2cap_chan *chan)
4659 BT_DBG("chan %p", chan);
4661 if (chan->local_amp_id == HCI_BREDR_ID) {
4662 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4663 return;
4664 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4665 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4666 /* Placeholder - start physical link setup */
4667 } else {
4668 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4669 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4670 chan->move_id = 0;
4671 l2cap_move_setup(chan);
4672 l2cap_send_move_chan_req(chan, 0);
4676 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4677 u8 local_amp_id, u8 remote_amp_id)
4679 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4680 local_amp_id, remote_amp_id);
4682 chan->fcs = L2CAP_FCS_NONE;
4684 /* Outgoing channel on AMP */
4685 if (chan->state == BT_CONNECT) {
4686 if (result == L2CAP_CR_SUCCESS) {
4687 chan->local_amp_id = local_amp_id;
4688 l2cap_send_create_chan_req(chan, remote_amp_id);
4689 } else {
4690 /* Revert to BR/EDR connect */
4691 l2cap_send_conn_req(chan);
4694 return;
4697 /* Incoming channel on AMP */
4698 if (__l2cap_no_conn_pending(chan)) {
4699 struct l2cap_conn_rsp rsp;
4700 char buf[128];
4701 rsp.scid = cpu_to_le16(chan->dcid);
4702 rsp.dcid = cpu_to_le16(chan->scid);
4704 if (result == L2CAP_CR_SUCCESS) {
4705 /* Send successful response */
4706 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4707 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4708 } else {
4709 /* Send negative response */
4710 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4711 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4714 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4715 sizeof(rsp), &rsp);
4717 if (result == L2CAP_CR_SUCCESS) {
4718 __l2cap_state_change(chan, BT_CONFIG);
4719 set_bit(CONF_REQ_SENT, &chan->conf_state);
4720 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4721 L2CAP_CONF_REQ,
4722 l2cap_build_conf_req(chan, buf), buf);
4723 chan->num_conf_req++;
4728 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4729 u8 remote_amp_id)
4731 l2cap_move_setup(chan);
4732 chan->move_id = local_amp_id;
4733 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4735 l2cap_send_move_chan_req(chan, remote_amp_id);
4738 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4740 struct hci_chan *hchan = NULL;
4742 /* Placeholder - get hci_chan for logical link */
4744 if (hchan) {
4745 if (hchan->state == BT_CONNECTED) {
4746 /* Logical link is ready to go */
4747 chan->hs_hcon = hchan->conn;
4748 chan->hs_hcon->l2cap_data = chan->conn;
4749 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4750 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4752 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4753 } else {
4754 /* Wait for logical link to be ready */
4755 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4757 } else {
4758 /* Logical link not available */
4759 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4763 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4765 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4766 u8 rsp_result;
4767 if (result == -EINVAL)
4768 rsp_result = L2CAP_MR_BAD_ID;
4769 else
4770 rsp_result = L2CAP_MR_NOT_ALLOWED;
4772 l2cap_send_move_chan_rsp(chan, rsp_result);
4775 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4776 chan->move_state = L2CAP_MOVE_STABLE;
4778 /* Restart data transmission */
4779 l2cap_ertm_send(chan);
4782 /* Invoke with locked chan */
4783 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4785 u8 local_amp_id = chan->local_amp_id;
4786 u8 remote_amp_id = chan->remote_amp_id;
4788 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4789 chan, result, local_amp_id, remote_amp_id);
4791 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4792 l2cap_chan_unlock(chan);
4793 return;
4796 if (chan->state != BT_CONNECTED) {
4797 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4798 } else if (result != L2CAP_MR_SUCCESS) {
4799 l2cap_do_move_cancel(chan, result);
4800 } else {
4801 switch (chan->move_role) {
4802 case L2CAP_MOVE_ROLE_INITIATOR:
4803 l2cap_do_move_initiate(chan, local_amp_id,
4804 remote_amp_id);
4805 break;
4806 case L2CAP_MOVE_ROLE_RESPONDER:
4807 l2cap_do_move_respond(chan, result);
4808 break;
4809 default:
4810 l2cap_do_move_cancel(chan, result);
4811 break;
4816 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4817 struct l2cap_cmd_hdr *cmd,
4818 u16 cmd_len, void *data)
4820 struct l2cap_move_chan_req *req = data;
4821 struct l2cap_move_chan_rsp rsp;
4822 struct l2cap_chan *chan;
4823 u16 icid = 0;
4824 u16 result = L2CAP_MR_NOT_ALLOWED;
4826 if (cmd_len != sizeof(*req))
4827 return -EPROTO;
4829 icid = le16_to_cpu(req->icid);
4831 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4833 if (!enable_hs)
4834 return -EINVAL;
4836 chan = l2cap_get_chan_by_dcid(conn, icid);
4837 if (!chan) {
4838 rsp.icid = cpu_to_le16(icid);
4839 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4840 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4841 sizeof(rsp), &rsp);
4842 return 0;
4845 chan->ident = cmd->ident;
4847 if (chan->scid < L2CAP_CID_DYN_START ||
4848 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4849 (chan->mode != L2CAP_MODE_ERTM &&
4850 chan->mode != L2CAP_MODE_STREAMING)) {
4851 result = L2CAP_MR_NOT_ALLOWED;
4852 goto send_move_response;
4855 if (chan->local_amp_id == req->dest_amp_id) {
4856 result = L2CAP_MR_SAME_ID;
4857 goto send_move_response;
4860 if (req->dest_amp_id) {
4861 struct hci_dev *hdev;
4862 hdev = hci_dev_get(req->dest_amp_id);
4863 if (!hdev || hdev->dev_type != HCI_AMP ||
4864 !test_bit(HCI_UP, &hdev->flags)) {
4865 if (hdev)
4866 hci_dev_put(hdev);
4868 result = L2CAP_MR_BAD_ID;
4869 goto send_move_response;
4871 hci_dev_put(hdev);
4874 /* Detect a move collision. Only send a collision response
4875 * if this side has "lost", otherwise proceed with the move.
4876 * The winner has the larger bd_addr.
4878 if ((__chan_is_moving(chan) ||
4879 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4880 bacmp(conn->src, conn->dst) > 0) {
4881 result = L2CAP_MR_COLLISION;
4882 goto send_move_response;
4885 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4886 l2cap_move_setup(chan);
4887 chan->move_id = req->dest_amp_id;
4888 icid = chan->dcid;
4890 if (!req->dest_amp_id) {
4891 /* Moving to BR/EDR */
4892 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4893 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4894 result = L2CAP_MR_PEND;
4895 } else {
4896 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4897 result = L2CAP_MR_SUCCESS;
4899 } else {
4900 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4901 /* Placeholder - uncomment when amp functions are available */
4902 /*amp_accept_physical(chan, req->dest_amp_id);*/
4903 result = L2CAP_MR_PEND;
4906 send_move_response:
4907 l2cap_send_move_chan_rsp(chan, result);
4909 l2cap_chan_unlock(chan);
4911 return 0;
4914 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4916 struct l2cap_chan *chan;
4917 struct hci_chan *hchan = NULL;
4919 chan = l2cap_get_chan_by_scid(conn, icid);
4920 if (!chan) {
4921 l2cap_send_move_chan_cfm_icid(conn, icid);
4922 return;
4925 __clear_chan_timer(chan);
4926 if (result == L2CAP_MR_PEND)
4927 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4929 switch (chan->move_state) {
4930 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4931 /* Move confirm will be sent when logical link
4932 * is complete.
4934 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4935 break;
4936 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4937 if (result == L2CAP_MR_PEND) {
4938 break;
4939 } else if (test_bit(CONN_LOCAL_BUSY,
4940 &chan->conn_state)) {
4941 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4942 } else {
4943 /* Logical link is up or moving to BR/EDR,
4944 * proceed with move
4946 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4947 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4949 break;
4950 case L2CAP_MOVE_WAIT_RSP:
4951 /* Moving to AMP */
4952 if (result == L2CAP_MR_SUCCESS) {
4953 /* Remote is ready, send confirm immediately
4954 * after logical link is ready
4956 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4957 } else {
4958 /* Both logical link and move success
4959 * are required to confirm
4961 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4964 /* Placeholder - get hci_chan for logical link */
4965 if (!hchan) {
4966 /* Logical link not available */
4967 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4968 break;
4971 /* If the logical link is not yet connected, do not
4972 * send confirmation.
4974 if (hchan->state != BT_CONNECTED)
4975 break;
4977 /* Logical link is already ready to go */
4979 chan->hs_hcon = hchan->conn;
4980 chan->hs_hcon->l2cap_data = chan->conn;
4982 if (result == L2CAP_MR_SUCCESS) {
4983 /* Can confirm now */
4984 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4985 } else {
4986 /* Now only need move success
4987 * to confirm
4989 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4992 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4993 break;
4994 default:
4995 /* Any other amp move state means the move failed. */
4996 chan->move_id = chan->local_amp_id;
4997 l2cap_move_done(chan);
4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5001 l2cap_chan_unlock(chan);
5004 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5005 u16 result)
5007 struct l2cap_chan *chan;
5009 chan = l2cap_get_chan_by_ident(conn, ident);
5010 if (!chan) {
5011 /* Could not locate channel, icid is best guess */
5012 l2cap_send_move_chan_cfm_icid(conn, icid);
5013 return;
5016 __clear_chan_timer(chan);
5018 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5019 if (result == L2CAP_MR_COLLISION) {
5020 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5021 } else {
5022 /* Cleanup - cancel move */
5023 chan->move_id = chan->local_amp_id;
5024 l2cap_move_done(chan);
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5030 l2cap_chan_unlock(chan);
5033 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5034 struct l2cap_cmd_hdr *cmd,
5035 u16 cmd_len, void *data)
5037 struct l2cap_move_chan_rsp *rsp = data;
5038 u16 icid, result;
5040 if (cmd_len != sizeof(*rsp))
5041 return -EPROTO;
5043 icid = le16_to_cpu(rsp->icid);
5044 result = le16_to_cpu(rsp->result);
5046 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5048 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5049 l2cap_move_continue(conn, icid, result);
5050 else
5051 l2cap_move_fail(conn, cmd->ident, icid, result);
5053 return 0;
5056 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5057 struct l2cap_cmd_hdr *cmd,
5058 u16 cmd_len, void *data)
5060 struct l2cap_move_chan_cfm *cfm = data;
5061 struct l2cap_chan *chan;
5062 u16 icid, result;
5064 if (cmd_len != sizeof(*cfm))
5065 return -EPROTO;
5067 icid = le16_to_cpu(cfm->icid);
5068 result = le16_to_cpu(cfm->result);
5070 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5072 chan = l2cap_get_chan_by_dcid(conn, icid);
5073 if (!chan) {
5074 /* Spec requires a response even if the icid was not found */
5075 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5076 return 0;
5079 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5080 if (result == L2CAP_MC_CONFIRMED) {
5081 chan->local_amp_id = chan->move_id;
5082 if (!chan->local_amp_id)
5083 __release_logical_link(chan);
5084 } else {
5085 chan->move_id = chan->local_amp_id;
5088 l2cap_move_done(chan);
5091 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5093 l2cap_chan_unlock(chan);
5095 return 0;
5098 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5099 struct l2cap_cmd_hdr *cmd,
5100 u16 cmd_len, void *data)
5102 struct l2cap_move_chan_cfm_rsp *rsp = data;
5103 struct l2cap_chan *chan;
5104 u16 icid;
5106 if (cmd_len != sizeof(*rsp))
5107 return -EPROTO;
5109 icid = le16_to_cpu(rsp->icid);
5111 BT_DBG("icid 0x%4.4x", icid);
5113 chan = l2cap_get_chan_by_scid(conn, icid);
5114 if (!chan)
5115 return 0;
5117 __clear_chan_timer(chan);
5119 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5120 chan->local_amp_id = chan->move_id;
5122 if (!chan->local_amp_id && chan->hs_hchan)
5123 __release_logical_link(chan);
5125 l2cap_move_done(chan);
5128 l2cap_chan_unlock(chan);
5130 return 0;
5133 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5134 u16 to_multiplier)
5136 u16 max_latency;
5138 if (min > max || min < 6 || max > 3200)
5139 return -EINVAL;
5141 if (to_multiplier < 10 || to_multiplier > 3200)
5142 return -EINVAL;
5144 if (max >= to_multiplier * 8)
5145 return -EINVAL;
5147 max_latency = (to_multiplier * 8 / max) - 1;
5148 if (latency > 499 || latency > max_latency)
5149 return -EINVAL;
5151 return 0;
5154 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5155 struct l2cap_cmd_hdr *cmd,
5156 u8 *data)
5158 struct hci_conn *hcon = conn->hcon;
5159 struct l2cap_conn_param_update_req *req;
5160 struct l2cap_conn_param_update_rsp rsp;
5161 u16 min, max, latency, to_multiplier, cmd_len;
5162 int err;
5164 if (!(hcon->link_mode & HCI_LM_MASTER))
5165 return -EINVAL;
5167 cmd_len = __le16_to_cpu(cmd->len);
5168 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5169 return -EPROTO;
5171 req = (struct l2cap_conn_param_update_req *) data;
5172 min = __le16_to_cpu(req->min);
5173 max = __le16_to_cpu(req->max);
5174 latency = __le16_to_cpu(req->latency);
5175 to_multiplier = __le16_to_cpu(req->to_multiplier);
5177 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5178 min, max, latency, to_multiplier);
5180 memset(&rsp, 0, sizeof(rsp));
5182 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5183 if (err)
5184 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5185 else
5186 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5188 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5189 sizeof(rsp), &rsp);
5191 if (!err)
5192 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5194 return 0;
5197 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5198 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5199 u8 *data)
5201 int err = 0;
5203 switch (cmd->code) {
5204 case L2CAP_COMMAND_REJ:
5205 l2cap_command_rej(conn, cmd, cmd_len, data);
5206 break;
5208 case L2CAP_CONN_REQ:
5209 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5210 break;
5212 case L2CAP_CONN_RSP:
5213 case L2CAP_CREATE_CHAN_RSP:
5214 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5215 break;
5217 case L2CAP_CONF_REQ:
5218 err = l2cap_config_req(conn, cmd, cmd_len, data);
5219 break;
5221 case L2CAP_CONF_RSP:
5222 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5223 break;
5225 case L2CAP_DISCONN_REQ:
5226 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5227 break;
5229 case L2CAP_DISCONN_RSP:
5230 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5231 break;
5233 case L2CAP_ECHO_REQ:
5234 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5235 break;
5237 case L2CAP_ECHO_RSP:
5238 break;
5240 case L2CAP_INFO_REQ:
5241 err = l2cap_information_req(conn, cmd, cmd_len, data);
5242 break;
5244 case L2CAP_INFO_RSP:
5245 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5246 break;
5248 case L2CAP_CREATE_CHAN_REQ:
5249 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5250 break;
5252 case L2CAP_MOVE_CHAN_REQ:
5253 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5254 break;
5256 case L2CAP_MOVE_CHAN_RSP:
5257 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5258 break;
5260 case L2CAP_MOVE_CHAN_CFM:
5261 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5262 break;
5264 case L2CAP_MOVE_CHAN_CFM_RSP:
5265 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5266 break;
5268 default:
5269 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5270 err = -EINVAL;
5271 break;
5274 return err;
5277 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5278 struct l2cap_cmd_hdr *cmd, u8 *data)
5280 switch (cmd->code) {
5281 case L2CAP_COMMAND_REJ:
5282 return 0;
5284 case L2CAP_CONN_PARAM_UPDATE_REQ:
5285 return l2cap_conn_param_update_req(conn, cmd, data);
5287 case L2CAP_CONN_PARAM_UPDATE_RSP:
5288 return 0;
5290 default:
5291 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5292 return -EINVAL;
5296 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5297 struct sk_buff *skb)
5299 u8 *data = skb->data;
5300 int len = skb->len;
5301 struct l2cap_cmd_hdr cmd;
5302 int err;
5304 l2cap_raw_recv(conn, skb);
5306 while (len >= L2CAP_CMD_HDR_SIZE) {
5307 u16 cmd_len;
5308 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5309 data += L2CAP_CMD_HDR_SIZE;
5310 len -= L2CAP_CMD_HDR_SIZE;
5312 cmd_len = le16_to_cpu(cmd.len);
5314 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5315 cmd.ident);
5317 if (cmd_len > len || !cmd.ident) {
5318 BT_DBG("corrupted command");
5319 break;
5322 err = l2cap_le_sig_cmd(conn, &cmd, data);
5323 if (err) {
5324 struct l2cap_cmd_rej_unk rej;
5326 BT_ERR("Wrong link type (%d)", err);
5328 /* FIXME: Map err to a valid reason */
5329 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5330 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5331 sizeof(rej), &rej);
5334 data += cmd_len;
5335 len -= cmd_len;
5338 kfree_skb(skb);
5341 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5342 struct sk_buff *skb)
5344 u8 *data = skb->data;
5345 int len = skb->len;
5346 struct l2cap_cmd_hdr cmd;
5347 int err;
5349 l2cap_raw_recv(conn, skb);
5351 while (len >= L2CAP_CMD_HDR_SIZE) {
5352 u16 cmd_len;
5353 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5354 data += L2CAP_CMD_HDR_SIZE;
5355 len -= L2CAP_CMD_HDR_SIZE;
5357 cmd_len = le16_to_cpu(cmd.len);
5359 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5360 cmd.ident);
5362 if (cmd_len > len || !cmd.ident) {
5363 BT_DBG("corrupted command");
5364 break;
5367 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5368 if (err) {
5369 struct l2cap_cmd_rej_unk rej;
5371 BT_ERR("Wrong link type (%d)", err);
5373 /* FIXME: Map err to a valid reason */
5374 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5375 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5376 sizeof(rej), &rej);
5379 data += cmd_len;
5380 len -= cmd_len;
5383 kfree_skb(skb);
5386 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5388 u16 our_fcs, rcv_fcs;
5389 int hdr_size;
5391 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5392 hdr_size = L2CAP_EXT_HDR_SIZE;
5393 else
5394 hdr_size = L2CAP_ENH_HDR_SIZE;
5396 if (chan->fcs == L2CAP_FCS_CRC16) {
5397 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5398 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5399 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5401 if (our_fcs != rcv_fcs)
5402 return -EBADMSG;
5404 return 0;
5407 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5409 struct l2cap_ctrl control;
5411 BT_DBG("chan %p", chan);
5413 memset(&control, 0, sizeof(control));
5414 control.sframe = 1;
5415 control.final = 1;
5416 control.reqseq = chan->buffer_seq;
5417 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5419 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5420 control.super = L2CAP_SUPER_RNR;
5421 l2cap_send_sframe(chan, &control);
5424 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5425 chan->unacked_frames > 0)
5426 __set_retrans_timer(chan);
5428 /* Send pending iframes */
5429 l2cap_ertm_send(chan);
5431 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5432 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5433 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5434 * send it now.
5436 control.super = L2CAP_SUPER_RR;
5437 l2cap_send_sframe(chan, &control);
5441 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5442 struct sk_buff **last_frag)
5444 /* skb->len reflects data in skb as well as all fragments
5445 * skb->data_len reflects only data in fragments
5447 if (!skb_has_frag_list(skb))
5448 skb_shinfo(skb)->frag_list = new_frag;
5450 new_frag->next = NULL;
5452 (*last_frag)->next = new_frag;
5453 *last_frag = new_frag;
5455 skb->len += new_frag->len;
5456 skb->data_len += new_frag->len;
5457 skb->truesize += new_frag->truesize;
5460 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5461 struct l2cap_ctrl *control)
5463 int err = -EINVAL;
5465 switch (control->sar) {
5466 case L2CAP_SAR_UNSEGMENTED:
5467 if (chan->sdu)
5468 break;
5470 err = chan->ops->recv(chan, skb);
5471 break;
5473 case L2CAP_SAR_START:
5474 if (chan->sdu)
5475 break;
5477 chan->sdu_len = get_unaligned_le16(skb->data);
5478 skb_pull(skb, L2CAP_SDULEN_SIZE);
5480 if (chan->sdu_len > chan->imtu) {
5481 err = -EMSGSIZE;
5482 break;
5485 if (skb->len >= chan->sdu_len)
5486 break;
5488 chan->sdu = skb;
5489 chan->sdu_last_frag = skb;
5491 skb = NULL;
5492 err = 0;
5493 break;
5495 case L2CAP_SAR_CONTINUE:
5496 if (!chan->sdu)
5497 break;
5499 append_skb_frag(chan->sdu, skb,
5500 &chan->sdu_last_frag);
5501 skb = NULL;
5503 if (chan->sdu->len >= chan->sdu_len)
5504 break;
5506 err = 0;
5507 break;
5509 case L2CAP_SAR_END:
5510 if (!chan->sdu)
5511 break;
5513 append_skb_frag(chan->sdu, skb,
5514 &chan->sdu_last_frag);
5515 skb = NULL;
5517 if (chan->sdu->len != chan->sdu_len)
5518 break;
5520 err = chan->ops->recv(chan, chan->sdu);
5522 if (!err) {
5523 /* Reassembly complete */
5524 chan->sdu = NULL;
5525 chan->sdu_last_frag = NULL;
5526 chan->sdu_len = 0;
5528 break;
5531 if (err) {
5532 kfree_skb(skb);
5533 kfree_skb(chan->sdu);
5534 chan->sdu = NULL;
5535 chan->sdu_last_frag = NULL;
5536 chan->sdu_len = 0;
5539 return err;
5542 static int l2cap_resegment(struct l2cap_chan *chan)
5544 /* Placeholder */
5545 return 0;
5548 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5550 u8 event;
5552 if (chan->mode != L2CAP_MODE_ERTM)
5553 return;
5555 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5556 l2cap_tx(chan, NULL, NULL, event);
5559 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5561 int err = 0;
5562 /* Pass sequential frames to l2cap_reassemble_sdu()
5563 * until a gap is encountered.
5566 BT_DBG("chan %p", chan);
5568 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5569 struct sk_buff *skb;
5570 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5571 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5573 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5575 if (!skb)
5576 break;
5578 skb_unlink(skb, &chan->srej_q);
5579 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5580 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5581 if (err)
5582 break;
5585 if (skb_queue_empty(&chan->srej_q)) {
5586 chan->rx_state = L2CAP_RX_STATE_RECV;
5587 l2cap_send_ack(chan);
5590 return err;
5593 static void l2cap_handle_srej(struct l2cap_chan *chan,
5594 struct l2cap_ctrl *control)
5596 struct sk_buff *skb;
5598 BT_DBG("chan %p, control %p", chan, control);
5600 if (control->reqseq == chan->next_tx_seq) {
5601 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5602 l2cap_send_disconn_req(chan, ECONNRESET);
5603 return;
5606 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5608 if (skb == NULL) {
5609 BT_DBG("Seq %d not available for retransmission",
5610 control->reqseq);
5611 return;
5614 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5615 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5616 l2cap_send_disconn_req(chan, ECONNRESET);
5617 return;
5620 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5622 if (control->poll) {
5623 l2cap_pass_to_tx(chan, control);
5625 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5626 l2cap_retransmit(chan, control);
5627 l2cap_ertm_send(chan);
5629 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5630 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5631 chan->srej_save_reqseq = control->reqseq;
5633 } else {
5634 l2cap_pass_to_tx_fbit(chan, control);
5636 if (control->final) {
5637 if (chan->srej_save_reqseq != control->reqseq ||
5638 !test_and_clear_bit(CONN_SREJ_ACT,
5639 &chan->conn_state))
5640 l2cap_retransmit(chan, control);
5641 } else {
5642 l2cap_retransmit(chan, control);
5643 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5644 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5645 chan->srej_save_reqseq = control->reqseq;
5651 static void l2cap_handle_rej(struct l2cap_chan *chan,
5652 struct l2cap_ctrl *control)
5654 struct sk_buff *skb;
5656 BT_DBG("chan %p, control %p", chan, control);
5658 if (control->reqseq == chan->next_tx_seq) {
5659 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5660 l2cap_send_disconn_req(chan, ECONNRESET);
5661 return;
5664 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5666 if (chan->max_tx && skb &&
5667 bt_cb(skb)->control.retries >= chan->max_tx) {
5668 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5669 l2cap_send_disconn_req(chan, ECONNRESET);
5670 return;
5673 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5675 l2cap_pass_to_tx(chan, control);
5677 if (control->final) {
5678 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5679 l2cap_retransmit_all(chan, control);
5680 } else {
5681 l2cap_retransmit_all(chan, control);
5682 l2cap_ertm_send(chan);
5683 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5684 set_bit(CONN_REJ_ACT, &chan->conn_state);
5688 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5690 BT_DBG("chan %p, txseq %d", chan, txseq);
5692 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5693 chan->expected_tx_seq);
5695 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5696 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5697 chan->tx_win) {
5698 /* See notes below regarding "double poll" and
5699 * invalid packets.
5701 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5702 BT_DBG("Invalid/Ignore - after SREJ");
5703 return L2CAP_TXSEQ_INVALID_IGNORE;
5704 } else {
5705 BT_DBG("Invalid - in window after SREJ sent");
5706 return L2CAP_TXSEQ_INVALID;
5710 if (chan->srej_list.head == txseq) {
5711 BT_DBG("Expected SREJ");
5712 return L2CAP_TXSEQ_EXPECTED_SREJ;
5715 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5716 BT_DBG("Duplicate SREJ - txseq already stored");
5717 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5720 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5721 BT_DBG("Unexpected SREJ - not requested");
5722 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5726 if (chan->expected_tx_seq == txseq) {
5727 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5728 chan->tx_win) {
5729 BT_DBG("Invalid - txseq outside tx window");
5730 return L2CAP_TXSEQ_INVALID;
5731 } else {
5732 BT_DBG("Expected");
5733 return L2CAP_TXSEQ_EXPECTED;
5737 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5738 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5739 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5740 return L2CAP_TXSEQ_DUPLICATE;
5743 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5744 /* A source of invalid packets is a "double poll" condition,
5745 * where delays cause us to send multiple poll packets. If
5746 * the remote stack receives and processes both polls,
5747 * sequence numbers can wrap around in such a way that a
5748 * resent frame has a sequence number that looks like new data
5749 * with a sequence gap. This would trigger an erroneous SREJ
5750 * request.
5752 * Fortunately, this is impossible with a tx window that's
5753 * less than half of the maximum sequence number, which allows
5754 * invalid frames to be safely ignored.
5756 * With tx window sizes greater than half of the tx window
5757 * maximum, the frame is invalid and cannot be ignored. This
5758 * causes a disconnect.
5761 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5762 BT_DBG("Invalid/Ignore - txseq outside tx window");
5763 return L2CAP_TXSEQ_INVALID_IGNORE;
5764 } else {
5765 BT_DBG("Invalid - txseq outside tx window");
5766 return L2CAP_TXSEQ_INVALID;
5768 } else {
5769 BT_DBG("Unexpected - txseq indicates missing frames");
5770 return L2CAP_TXSEQ_UNEXPECTED;
5774 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5775 struct l2cap_ctrl *control,
5776 struct sk_buff *skb, u8 event)
5778 int err = 0;
5779 bool skb_in_use = 0;
5781 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5782 event);
5784 switch (event) {
5785 case L2CAP_EV_RECV_IFRAME:
5786 switch (l2cap_classify_txseq(chan, control->txseq)) {
5787 case L2CAP_TXSEQ_EXPECTED:
5788 l2cap_pass_to_tx(chan, control);
5790 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5791 BT_DBG("Busy, discarding expected seq %d",
5792 control->txseq);
5793 break;
5796 chan->expected_tx_seq = __next_seq(chan,
5797 control->txseq);
5799 chan->buffer_seq = chan->expected_tx_seq;
5800 skb_in_use = 1;
5802 err = l2cap_reassemble_sdu(chan, skb, control);
5803 if (err)
5804 break;
5806 if (control->final) {
5807 if (!test_and_clear_bit(CONN_REJ_ACT,
5808 &chan->conn_state)) {
5809 control->final = 0;
5810 l2cap_retransmit_all(chan, control);
5811 l2cap_ertm_send(chan);
5815 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5816 l2cap_send_ack(chan);
5817 break;
5818 case L2CAP_TXSEQ_UNEXPECTED:
5819 l2cap_pass_to_tx(chan, control);
5821 /* Can't issue SREJ frames in the local busy state.
5822 * Drop this frame, it will be seen as missing
5823 * when local busy is exited.
5825 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5826 BT_DBG("Busy, discarding unexpected seq %d",
5827 control->txseq);
5828 break;
5831 /* There was a gap in the sequence, so an SREJ
5832 * must be sent for each missing frame. The
5833 * current frame is stored for later use.
5835 skb_queue_tail(&chan->srej_q, skb);
5836 skb_in_use = 1;
5837 BT_DBG("Queued %p (queue len %d)", skb,
5838 skb_queue_len(&chan->srej_q));
5840 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5841 l2cap_seq_list_clear(&chan->srej_list);
5842 l2cap_send_srej(chan, control->txseq);
5844 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5845 break;
5846 case L2CAP_TXSEQ_DUPLICATE:
5847 l2cap_pass_to_tx(chan, control);
5848 break;
5849 case L2CAP_TXSEQ_INVALID_IGNORE:
5850 break;
5851 case L2CAP_TXSEQ_INVALID:
5852 default:
5853 l2cap_send_disconn_req(chan, ECONNRESET);
5854 break;
5856 break;
5857 case L2CAP_EV_RECV_RR:
5858 l2cap_pass_to_tx(chan, control);
5859 if (control->final) {
5860 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5862 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5863 !__chan_is_moving(chan)) {
5864 control->final = 0;
5865 l2cap_retransmit_all(chan, control);
5868 l2cap_ertm_send(chan);
5869 } else if (control->poll) {
5870 l2cap_send_i_or_rr_or_rnr(chan);
5871 } else {
5872 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5873 &chan->conn_state) &&
5874 chan->unacked_frames)
5875 __set_retrans_timer(chan);
5877 l2cap_ertm_send(chan);
5879 break;
5880 case L2CAP_EV_RECV_RNR:
5881 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5882 l2cap_pass_to_tx(chan, control);
5883 if (control && control->poll) {
5884 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5885 l2cap_send_rr_or_rnr(chan, 0);
5887 __clear_retrans_timer(chan);
5888 l2cap_seq_list_clear(&chan->retrans_list);
5889 break;
5890 case L2CAP_EV_RECV_REJ:
5891 l2cap_handle_rej(chan, control);
5892 break;
5893 case L2CAP_EV_RECV_SREJ:
5894 l2cap_handle_srej(chan, control);
5895 break;
5896 default:
5897 break;
5900 if (skb && !skb_in_use) {
5901 BT_DBG("Freeing %p", skb);
5902 kfree_skb(skb);
5905 return err;
5908 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5909 struct l2cap_ctrl *control,
5910 struct sk_buff *skb, u8 event)
5912 int err = 0;
5913 u16 txseq = control->txseq;
5914 bool skb_in_use = 0;
5916 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5917 event);
5919 switch (event) {
5920 case L2CAP_EV_RECV_IFRAME:
5921 switch (l2cap_classify_txseq(chan, txseq)) {
5922 case L2CAP_TXSEQ_EXPECTED:
5923 /* Keep frame for reassembly later */
5924 l2cap_pass_to_tx(chan, control);
5925 skb_queue_tail(&chan->srej_q, skb);
5926 skb_in_use = 1;
5927 BT_DBG("Queued %p (queue len %d)", skb,
5928 skb_queue_len(&chan->srej_q));
5930 chan->expected_tx_seq = __next_seq(chan, txseq);
5931 break;
5932 case L2CAP_TXSEQ_EXPECTED_SREJ:
5933 l2cap_seq_list_pop(&chan->srej_list);
5935 l2cap_pass_to_tx(chan, control);
5936 skb_queue_tail(&chan->srej_q, skb);
5937 skb_in_use = 1;
5938 BT_DBG("Queued %p (queue len %d)", skb,
5939 skb_queue_len(&chan->srej_q));
5941 err = l2cap_rx_queued_iframes(chan);
5942 if (err)
5943 break;
5945 break;
5946 case L2CAP_TXSEQ_UNEXPECTED:
5947 /* Got a frame that can't be reassembled yet.
5948 * Save it for later, and send SREJs to cover
5949 * the missing frames.
5951 skb_queue_tail(&chan->srej_q, skb);
5952 skb_in_use = 1;
5953 BT_DBG("Queued %p (queue len %d)", skb,
5954 skb_queue_len(&chan->srej_q));
5956 l2cap_pass_to_tx(chan, control);
5957 l2cap_send_srej(chan, control->txseq);
5958 break;
5959 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5960 /* This frame was requested with an SREJ, but
5961 * some expected retransmitted frames are
5962 * missing. Request retransmission of missing
5963 * SREJ'd frames.
5965 skb_queue_tail(&chan->srej_q, skb);
5966 skb_in_use = 1;
5967 BT_DBG("Queued %p (queue len %d)", skb,
5968 skb_queue_len(&chan->srej_q));
5970 l2cap_pass_to_tx(chan, control);
5971 l2cap_send_srej_list(chan, control->txseq);
5972 break;
5973 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5974 /* We've already queued this frame. Drop this copy. */
5975 l2cap_pass_to_tx(chan, control);
5976 break;
5977 case L2CAP_TXSEQ_DUPLICATE:
5978 /* Expecting a later sequence number, so this frame
5979 * was already received. Ignore it completely.
5981 break;
5982 case L2CAP_TXSEQ_INVALID_IGNORE:
5983 break;
5984 case L2CAP_TXSEQ_INVALID:
5985 default:
5986 l2cap_send_disconn_req(chan, ECONNRESET);
5987 break;
5989 break;
5990 case L2CAP_EV_RECV_RR:
5991 l2cap_pass_to_tx(chan, control);
5992 if (control->final) {
5993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5995 if (!test_and_clear_bit(CONN_REJ_ACT,
5996 &chan->conn_state)) {
5997 control->final = 0;
5998 l2cap_retransmit_all(chan, control);
6001 l2cap_ertm_send(chan);
6002 } else if (control->poll) {
6003 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6004 &chan->conn_state) &&
6005 chan->unacked_frames) {
6006 __set_retrans_timer(chan);
6009 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6010 l2cap_send_srej_tail(chan);
6011 } else {
6012 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6013 &chan->conn_state) &&
6014 chan->unacked_frames)
6015 __set_retrans_timer(chan);
6017 l2cap_send_ack(chan);
6019 break;
6020 case L2CAP_EV_RECV_RNR:
6021 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6022 l2cap_pass_to_tx(chan, control);
6023 if (control->poll) {
6024 l2cap_send_srej_tail(chan);
6025 } else {
6026 struct l2cap_ctrl rr_control;
6027 memset(&rr_control, 0, sizeof(rr_control));
6028 rr_control.sframe = 1;
6029 rr_control.super = L2CAP_SUPER_RR;
6030 rr_control.reqseq = chan->buffer_seq;
6031 l2cap_send_sframe(chan, &rr_control);
6034 break;
6035 case L2CAP_EV_RECV_REJ:
6036 l2cap_handle_rej(chan, control);
6037 break;
6038 case L2CAP_EV_RECV_SREJ:
6039 l2cap_handle_srej(chan, control);
6040 break;
6043 if (skb && !skb_in_use) {
6044 BT_DBG("Freeing %p", skb);
6045 kfree_skb(skb);
6048 return err;
6051 static int l2cap_finish_move(struct l2cap_chan *chan)
6053 BT_DBG("chan %p", chan);
6055 chan->rx_state = L2CAP_RX_STATE_RECV;
6057 if (chan->hs_hcon)
6058 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6059 else
6060 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6062 return l2cap_resegment(chan);
6065 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6066 struct l2cap_ctrl *control,
6067 struct sk_buff *skb, u8 event)
6069 int err;
6071 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6072 event);
6074 if (!control->poll)
6075 return -EPROTO;
6077 l2cap_process_reqseq(chan, control->reqseq);
6079 if (!skb_queue_empty(&chan->tx_q))
6080 chan->tx_send_head = skb_peek(&chan->tx_q);
6081 else
6082 chan->tx_send_head = NULL;
6084 /* Rewind next_tx_seq to the point expected
6085 * by the receiver.
6087 chan->next_tx_seq = control->reqseq;
6088 chan->unacked_frames = 0;
6090 err = l2cap_finish_move(chan);
6091 if (err)
6092 return err;
6094 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6095 l2cap_send_i_or_rr_or_rnr(chan);
6097 if (event == L2CAP_EV_RECV_IFRAME)
6098 return -EPROTO;
6100 return l2cap_rx_state_recv(chan, control, NULL, event);
6103 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6104 struct l2cap_ctrl *control,
6105 struct sk_buff *skb, u8 event)
6107 int err;
6109 if (!control->final)
6110 return -EPROTO;
6112 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6114 chan->rx_state = L2CAP_RX_STATE_RECV;
6115 l2cap_process_reqseq(chan, control->reqseq);
6117 if (!skb_queue_empty(&chan->tx_q))
6118 chan->tx_send_head = skb_peek(&chan->tx_q);
6119 else
6120 chan->tx_send_head = NULL;
6122 /* Rewind next_tx_seq to the point expected
6123 * by the receiver.
6125 chan->next_tx_seq = control->reqseq;
6126 chan->unacked_frames = 0;
6128 if (chan->hs_hcon)
6129 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6130 else
6131 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6133 err = l2cap_resegment(chan);
6135 if (!err)
6136 err = l2cap_rx_state_recv(chan, control, skb, event);
6138 return err;
6141 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6143 /* Make sure reqseq is for a packet that has been sent but not acked */
6144 u16 unacked;
6146 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6147 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6150 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6151 struct sk_buff *skb, u8 event)
6153 int err = 0;
6155 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6156 control, skb, event, chan->rx_state);
6158 if (__valid_reqseq(chan, control->reqseq)) {
6159 switch (chan->rx_state) {
6160 case L2CAP_RX_STATE_RECV:
6161 err = l2cap_rx_state_recv(chan, control, skb, event);
6162 break;
6163 case L2CAP_RX_STATE_SREJ_SENT:
6164 err = l2cap_rx_state_srej_sent(chan, control, skb,
6165 event);
6166 break;
6167 case L2CAP_RX_STATE_WAIT_P:
6168 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6169 break;
6170 case L2CAP_RX_STATE_WAIT_F:
6171 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6172 break;
6173 default:
6174 /* shut it down */
6175 break;
6177 } else {
6178 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6179 control->reqseq, chan->next_tx_seq,
6180 chan->expected_ack_seq);
6181 l2cap_send_disconn_req(chan, ECONNRESET);
6184 return err;
6187 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6188 struct sk_buff *skb)
6190 int err = 0;
6192 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6193 chan->rx_state);
6195 if (l2cap_classify_txseq(chan, control->txseq) ==
6196 L2CAP_TXSEQ_EXPECTED) {
6197 l2cap_pass_to_tx(chan, control);
6199 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6200 __next_seq(chan, chan->buffer_seq));
6202 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6204 l2cap_reassemble_sdu(chan, skb, control);
6205 } else {
6206 if (chan->sdu) {
6207 kfree_skb(chan->sdu);
6208 chan->sdu = NULL;
6210 chan->sdu_last_frag = NULL;
6211 chan->sdu_len = 0;
6213 if (skb) {
6214 BT_DBG("Freeing %p", skb);
6215 kfree_skb(skb);
6219 chan->last_acked_seq = control->txseq;
6220 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6222 return err;
6225 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6227 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6228 u16 len;
6229 u8 event;
6231 __unpack_control(chan, skb);
6233 len = skb->len;
6236 * We can just drop the corrupted I-frame here.
6237 * Receiver will miss it and start proper recovery
6238 * procedures and ask for retransmission.
6240 if (l2cap_check_fcs(chan, skb))
6241 goto drop;
6243 if (!control->sframe && control->sar == L2CAP_SAR_START)
6244 len -= L2CAP_SDULEN_SIZE;
6246 if (chan->fcs == L2CAP_FCS_CRC16)
6247 len -= L2CAP_FCS_SIZE;
6249 if (len > chan->mps) {
6250 l2cap_send_disconn_req(chan, ECONNRESET);
6251 goto drop;
6254 if (!control->sframe) {
6255 int err;
6257 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6258 control->sar, control->reqseq, control->final,
6259 control->txseq);
6261 /* Validate F-bit - F=0 always valid, F=1 only
6262 * valid in TX WAIT_F
6264 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6265 goto drop;
6267 if (chan->mode != L2CAP_MODE_STREAMING) {
6268 event = L2CAP_EV_RECV_IFRAME;
6269 err = l2cap_rx(chan, control, skb, event);
6270 } else {
6271 err = l2cap_stream_rx(chan, control, skb);
6274 if (err)
6275 l2cap_send_disconn_req(chan, ECONNRESET);
6276 } else {
6277 const u8 rx_func_to_event[4] = {
6278 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6279 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6282 /* Only I-frames are expected in streaming mode */
6283 if (chan->mode == L2CAP_MODE_STREAMING)
6284 goto drop;
6286 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6287 control->reqseq, control->final, control->poll,
6288 control->super);
6290 if (len != 0) {
6291 BT_ERR("Trailing bytes: %d in sframe", len);
6292 l2cap_send_disconn_req(chan, ECONNRESET);
6293 goto drop;
6296 /* Validate F and P bits */
6297 if (control->final && (control->poll ||
6298 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6299 goto drop;
6301 event = rx_func_to_event[control->super];
6302 if (l2cap_rx(chan, control, skb, event))
6303 l2cap_send_disconn_req(chan, ECONNRESET);
6306 return 0;
6308 drop:
6309 kfree_skb(skb);
6310 return 0;
6313 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6314 struct sk_buff *skb)
6316 struct l2cap_chan *chan;
6318 chan = l2cap_get_chan_by_scid(conn, cid);
6319 if (!chan) {
6320 if (cid == L2CAP_CID_A2MP) {
6321 chan = a2mp_channel_create(conn, skb);
6322 if (!chan) {
6323 kfree_skb(skb);
6324 return;
6327 l2cap_chan_lock(chan);
6328 } else {
6329 BT_DBG("unknown cid 0x%4.4x", cid);
6330 /* Drop packet and return */
6331 kfree_skb(skb);
6332 return;
6336 BT_DBG("chan %p, len %d", chan, skb->len);
6338 if (chan->state != BT_CONNECTED)
6339 goto drop;
6341 switch (chan->mode) {
6342 case L2CAP_MODE_BASIC:
6343 /* If socket recv buffers overflows we drop data here
6344 * which is *bad* because L2CAP has to be reliable.
6345 * But we don't have any other choice. L2CAP doesn't
6346 * provide flow control mechanism. */
6348 if (chan->imtu < skb->len)
6349 goto drop;
6351 if (!chan->ops->recv(chan, skb))
6352 goto done;
6353 break;
6355 case L2CAP_MODE_ERTM:
6356 case L2CAP_MODE_STREAMING:
6357 l2cap_data_rcv(chan, skb);
6358 goto done;
6360 default:
6361 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6362 break;
6365 drop:
6366 kfree_skb(skb);
6368 done:
6369 l2cap_chan_unlock(chan);
6372 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6373 struct sk_buff *skb)
6375 struct l2cap_chan *chan;
6377 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6378 if (!chan)
6379 goto drop;
6381 BT_DBG("chan %p, len %d", chan, skb->len);
6383 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6384 goto drop;
6386 if (chan->imtu < skb->len)
6387 goto drop;
6389 if (!chan->ops->recv(chan, skb))
6390 return;
6392 drop:
6393 kfree_skb(skb);
6396 static void l2cap_att_channel(struct l2cap_conn *conn,
6397 struct sk_buff *skb)
6399 struct l2cap_chan *chan;
6401 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6402 conn->src, conn->dst);
6403 if (!chan)
6404 goto drop;
6406 BT_DBG("chan %p, len %d", chan, skb->len);
6408 if (chan->imtu < skb->len)
6409 goto drop;
6411 if (!chan->ops->recv(chan, skb))
6412 return;
6414 drop:
6415 kfree_skb(skb);
6418 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6420 struct l2cap_hdr *lh = (void *) skb->data;
6421 u16 cid, len;
6422 __le16 psm;
6424 skb_pull(skb, L2CAP_HDR_SIZE);
6425 cid = __le16_to_cpu(lh->cid);
6426 len = __le16_to_cpu(lh->len);
6428 if (len != skb->len) {
6429 kfree_skb(skb);
6430 return;
6433 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6435 switch (cid) {
6436 case L2CAP_CID_LE_SIGNALING:
6437 l2cap_le_sig_channel(conn, skb);
6438 break;
6439 case L2CAP_CID_SIGNALING:
6440 l2cap_sig_channel(conn, skb);
6441 break;
6443 case L2CAP_CID_CONN_LESS:
6444 psm = get_unaligned((__le16 *) skb->data);
6445 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6446 l2cap_conless_channel(conn, psm, skb);
6447 break;
6449 case L2CAP_CID_ATT:
6450 l2cap_att_channel(conn, skb);
6451 break;
6453 case L2CAP_CID_SMP:
6454 if (smp_sig_channel(conn, skb))
6455 l2cap_conn_del(conn->hcon, EACCES);
6456 break;
6458 default:
6459 l2cap_data_channel(conn, cid, skb);
6460 break;
6464 /* ---- L2CAP interface with lower layer (HCI) ---- */
6466 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6468 int exact = 0, lm1 = 0, lm2 = 0;
6469 struct l2cap_chan *c;
6471 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6473 /* Find listening sockets and check their link_mode */
6474 read_lock(&chan_list_lock);
6475 list_for_each_entry(c, &chan_list, global_l) {
6476 struct sock *sk = c->sk;
6478 if (c->state != BT_LISTEN)
6479 continue;
6481 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6482 lm1 |= HCI_LM_ACCEPT;
6483 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6484 lm1 |= HCI_LM_MASTER;
6485 exact++;
6486 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6487 lm2 |= HCI_LM_ACCEPT;
6488 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6489 lm2 |= HCI_LM_MASTER;
6492 read_unlock(&chan_list_lock);
6494 return exact ? lm1 : lm2;
6497 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6499 struct l2cap_conn *conn;
6501 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6503 if (!status) {
6504 conn = l2cap_conn_add(hcon);
6505 if (conn)
6506 l2cap_conn_ready(conn);
6507 } else {
6508 l2cap_conn_del(hcon, bt_to_errno(status));
6512 int l2cap_disconn_ind(struct hci_conn *hcon)
6514 struct l2cap_conn *conn = hcon->l2cap_data;
6516 BT_DBG("hcon %p", hcon);
6518 if (!conn)
6519 return HCI_ERROR_REMOTE_USER_TERM;
6520 return conn->disc_reason;
6523 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6525 BT_DBG("hcon %p reason %d", hcon, reason);
6527 l2cap_conn_del(hcon, bt_to_errno(reason));
6530 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6532 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6533 return;
6535 if (encrypt == 0x00) {
6536 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6537 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6538 } else if (chan->sec_level == BT_SECURITY_HIGH)
6539 l2cap_chan_close(chan, ECONNREFUSED);
6540 } else {
6541 if (chan->sec_level == BT_SECURITY_MEDIUM)
6542 __clear_chan_timer(chan);
6546 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6548 struct l2cap_conn *conn = hcon->l2cap_data;
6549 struct l2cap_chan *chan;
6551 if (!conn)
6552 return 0;
6554 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6556 if (hcon->type == LE_LINK) {
6557 if (!status && encrypt)
6558 smp_distribute_keys(conn, 0);
6559 cancel_delayed_work(&conn->security_timer);
6562 mutex_lock(&conn->chan_lock);
6564 list_for_each_entry(chan, &conn->chan_l, list) {
6565 l2cap_chan_lock(chan);
6567 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6568 state_to_string(chan->state));
6570 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6571 l2cap_chan_unlock(chan);
6572 continue;
6575 if (chan->scid == L2CAP_CID_ATT) {
6576 if (!status && encrypt) {
6577 chan->sec_level = hcon->sec_level;
6578 l2cap_chan_ready(chan);
6581 l2cap_chan_unlock(chan);
6582 continue;
6585 if (!__l2cap_no_conn_pending(chan)) {
6586 l2cap_chan_unlock(chan);
6587 continue;
6590 if (!status && (chan->state == BT_CONNECTED ||
6591 chan->state == BT_CONFIG)) {
6592 struct sock *sk = chan->sk;
6594 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6595 sk->sk_state_change(sk);
6597 l2cap_check_encryption(chan, encrypt);
6598 l2cap_chan_unlock(chan);
6599 continue;
6602 if (chan->state == BT_CONNECT) {
6603 if (!status) {
6604 l2cap_start_connection(chan);
6605 } else {
6606 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6608 } else if (chan->state == BT_CONNECT2) {
6609 struct sock *sk = chan->sk;
6610 struct l2cap_conn_rsp rsp;
6611 __u16 res, stat;
6613 lock_sock(sk);
6615 if (!status) {
6616 if (test_bit(BT_SK_DEFER_SETUP,
6617 &bt_sk(sk)->flags)) {
6618 res = L2CAP_CR_PEND;
6619 stat = L2CAP_CS_AUTHOR_PEND;
6620 chan->ops->defer(chan);
6621 } else {
6622 __l2cap_state_change(chan, BT_CONFIG);
6623 res = L2CAP_CR_SUCCESS;
6624 stat = L2CAP_CS_NO_INFO;
6626 } else {
6627 __l2cap_state_change(chan, BT_DISCONN);
6628 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6629 res = L2CAP_CR_SEC_BLOCK;
6630 stat = L2CAP_CS_NO_INFO;
6633 release_sock(sk);
6635 rsp.scid = cpu_to_le16(chan->dcid);
6636 rsp.dcid = cpu_to_le16(chan->scid);
6637 rsp.result = cpu_to_le16(res);
6638 rsp.status = cpu_to_le16(stat);
6639 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6640 sizeof(rsp), &rsp);
6642 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6643 res == L2CAP_CR_SUCCESS) {
6644 char buf[128];
6645 set_bit(CONF_REQ_SENT, &chan->conf_state);
6646 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6647 L2CAP_CONF_REQ,
6648 l2cap_build_conf_req(chan, buf),
6649 buf);
6650 chan->num_conf_req++;
6654 l2cap_chan_unlock(chan);
6657 mutex_unlock(&conn->chan_lock);
6659 return 0;
6662 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6664 struct l2cap_conn *conn = hcon->l2cap_data;
6665 struct l2cap_hdr *hdr;
6666 int len;
6668 /* For AMP controller do not create l2cap conn */
6669 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6670 goto drop;
6672 if (!conn)
6673 conn = l2cap_conn_add(hcon);
6675 if (!conn)
6676 goto drop;
6678 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6680 switch (flags) {
6681 case ACL_START:
6682 case ACL_START_NO_FLUSH:
6683 case ACL_COMPLETE:
6684 if (conn->rx_len) {
6685 BT_ERR("Unexpected start frame (len %d)", skb->len);
6686 kfree_skb(conn->rx_skb);
6687 conn->rx_skb = NULL;
6688 conn->rx_len = 0;
6689 l2cap_conn_unreliable(conn, ECOMM);
6692 /* Start fragment always begin with Basic L2CAP header */
6693 if (skb->len < L2CAP_HDR_SIZE) {
6694 BT_ERR("Frame is too short (len %d)", skb->len);
6695 l2cap_conn_unreliable(conn, ECOMM);
6696 goto drop;
6699 hdr = (struct l2cap_hdr *) skb->data;
6700 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6702 if (len == skb->len) {
6703 /* Complete frame received */
6704 l2cap_recv_frame(conn, skb);
6705 return 0;
6708 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6710 if (skb->len > len) {
6711 BT_ERR("Frame is too long (len %d, expected len %d)",
6712 skb->len, len);
6713 l2cap_conn_unreliable(conn, ECOMM);
6714 goto drop;
6717 /* Allocate skb for the complete frame (with header) */
6718 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6719 if (!conn->rx_skb)
6720 goto drop;
6722 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6723 skb->len);
6724 conn->rx_len = len - skb->len;
6725 break;
6727 case ACL_CONT:
6728 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6730 if (!conn->rx_len) {
6731 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6732 l2cap_conn_unreliable(conn, ECOMM);
6733 goto drop;
6736 if (skb->len > conn->rx_len) {
6737 BT_ERR("Fragment is too long (len %d, expected %d)",
6738 skb->len, conn->rx_len);
6739 kfree_skb(conn->rx_skb);
6740 conn->rx_skb = NULL;
6741 conn->rx_len = 0;
6742 l2cap_conn_unreliable(conn, ECOMM);
6743 goto drop;
6746 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6747 skb->len);
6748 conn->rx_len -= skb->len;
6750 if (!conn->rx_len) {
6751 /* Complete frame received */
6752 l2cap_recv_frame(conn, conn->rx_skb);
6753 conn->rx_skb = NULL;
6755 break;
6758 drop:
6759 kfree_skb(skb);
6760 return 0;
6763 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6765 struct l2cap_chan *c;
6767 read_lock(&chan_list_lock);
6769 list_for_each_entry(c, &chan_list, global_l) {
6770 struct sock *sk = c->sk;
6772 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6773 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6774 c->state, __le16_to_cpu(c->psm),
6775 c->scid, c->dcid, c->imtu, c->omtu,
6776 c->sec_level, c->mode);
6779 read_unlock(&chan_list_lock);
6781 return 0;
6784 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6786 return single_open(file, l2cap_debugfs_show, inode->i_private);
6789 static const struct file_operations l2cap_debugfs_fops = {
6790 .open = l2cap_debugfs_open,
6791 .read = seq_read,
6792 .llseek = seq_lseek,
6793 .release = single_release,
6796 static struct dentry *l2cap_debugfs;
6798 int __init l2cap_init(void)
6800 int err;
6802 err = l2cap_init_sockets();
6803 if (err < 0)
6804 return err;
6806 if (bt_debugfs) {
6807 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6808 NULL, &l2cap_debugfs_fops);
6809 if (!l2cap_debugfs)
6810 BT_ERR("Failed to create L2CAP debug file");
6813 return 0;
6816 void l2cap_exit(void)
6818 debugfs_remove(l2cap_debugfs);
6819 l2cap_cleanup_sockets();
6822 module_param(disable_ertm, bool, 0644);
6823 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");