mm/zsmalloc: allocate exactly size of struct zs_pool
[linux/fpc-iii.git] / net / bluetooth / l2cap_core.c
bloba2b6dfa38a0cfd7f020c9d6f5a1584a92ae5c6e6
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
44 #define LE_FLOWCTL_MAX_CREDITS 65535
46 bool disable_ertm;
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
66 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 if (hcon->type == LE_LINK) {
69 if (type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
75 return BDADDR_BREDR;
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 u16 cid)
83 struct l2cap_chan *c;
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
89 return NULL;
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 u16 cid)
95 struct l2cap_chan *c;
97 list_for_each_entry(c, &conn->chan_l, list) {
98 if (c->scid == cid)
99 return c;
101 return NULL;
104 /* Find channel with given SCID.
105 * Returns locked channel. */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 u16 cid)
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_scid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
117 return c;
120 /* Find channel with given DCID.
121 * Returns locked channel.
123 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
124 u16 cid)
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_dcid(conn, cid);
130 if (c)
131 l2cap_chan_lock(c);
132 mutex_unlock(&conn->chan_lock);
134 return c;
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
138 u8 ident)
140 struct l2cap_chan *c;
142 list_for_each_entry(c, &conn->chan_l, list) {
143 if (c->ident == ident)
144 return c;
146 return NULL;
149 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
150 u8 ident)
152 struct l2cap_chan *c;
154 mutex_lock(&conn->chan_lock);
155 c = __l2cap_get_chan_by_ident(conn, ident);
156 if (c)
157 l2cap_chan_lock(c);
158 mutex_unlock(&conn->chan_lock);
160 return c;
163 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 struct l2cap_chan *c;
167 list_for_each_entry(c, &chan_list, global_l) {
168 if (c->sport == psm && !bacmp(&c->src, src))
169 return c;
171 return NULL;
174 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 int err;
178 write_lock(&chan_list_lock);
180 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 err = -EADDRINUSE;
182 goto done;
185 if (psm) {
186 chan->psm = psm;
187 chan->sport = psm;
188 err = 0;
189 } else {
190 u16 p;
192 err = -EINVAL;
193 for (p = 0x1001; p < 0x1100; p += 2)
194 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
195 chan->psm = cpu_to_le16(p);
196 chan->sport = cpu_to_le16(p);
197 err = 0;
198 break;
202 done:
203 write_unlock(&chan_list_lock);
204 return err;
206 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 write_lock(&chan_list_lock);
212 /* Override the defaults (which are for conn-oriented) */
213 chan->omtu = L2CAP_DEFAULT_MTU;
214 chan->chan_type = L2CAP_CHAN_FIXED;
216 chan->scid = scid;
218 write_unlock(&chan_list_lock);
220 return 0;
223 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225 u16 cid, dyn_end;
227 if (conn->hcon->type == LE_LINK)
228 dyn_end = L2CAP_CID_LE_DYN_END;
229 else
230 dyn_end = L2CAP_CID_DYN_END;
232 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
233 if (!__l2cap_get_chan_by_scid(conn, cid))
234 return cid;
237 return 0;
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
243 state_to_string(state));
245 chan->state = state;
246 chan->ops->state_change(chan, state, 0);
249 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 int state, int err)
252 chan->state = state;
253 chan->ops->state_change(chan, chan->state, err);
256 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258 chan->ops->state_change(chan, chan->state, err);
261 static void __set_retrans_timer(struct l2cap_chan *chan)
263 if (!delayed_work_pending(&chan->monitor_timer) &&
264 chan->retrans_timeout) {
265 l2cap_set_timer(chan, &chan->retrans_timer,
266 msecs_to_jiffies(chan->retrans_timeout));
270 static void __set_monitor_timer(struct l2cap_chan *chan)
272 __clear_retrans_timer(chan);
273 if (chan->monitor_timeout) {
274 l2cap_set_timer(chan, &chan->monitor_timer,
275 msecs_to_jiffies(chan->monitor_timeout));
279 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
280 u16 seq)
282 struct sk_buff *skb;
284 skb_queue_walk(head, skb) {
285 if (bt_cb(skb)->control.txseq == seq)
286 return skb;
289 return NULL;
292 /* ---- L2CAP sequence number lists ---- */
294 /* For ERTM, ordered lists of sequence numbers must be tracked for
295 * SREJ requests that are received and for frames that are to be
296 * retransmitted. These seq_list functions implement a singly-linked
297 * list in an array, where membership in the list can also be checked
298 * in constant time. Items can also be added to the tail of the list
299 * and removed from the head in constant time, without further memory
300 * allocs or frees.
303 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305 size_t alloc_size, i;
307 /* Allocated size is a power of 2 to map sequence numbers
308 * (which may be up to 14 bits) in to a smaller array that is
309 * sized for the negotiated ERTM transmit windows.
311 alloc_size = roundup_pow_of_two(size);
313 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 if (!seq_list->list)
315 return -ENOMEM;
317 seq_list->mask = alloc_size - 1;
318 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
319 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
320 for (i = 0; i < alloc_size; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 return 0;
326 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328 kfree(seq_list->list);
331 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
332 u16 seq)
334 /* Constant-time check for list membership */
335 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
338 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340 u16 seq = seq_list->head;
341 u16 mask = seq_list->mask;
343 seq_list->head = seq_list->list[seq & mask];
344 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
347 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
348 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 return seq;
354 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356 u16 i;
358 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
359 return;
361 for (i = 0; i <= seq_list->mask; i++)
362 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
365 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370 u16 mask = seq_list->mask;
372 /* All appends happen in constant time */
374 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
375 return;
377 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
378 seq_list->head = seq;
379 else
380 seq_list->list[seq_list->tail & mask] = seq;
382 seq_list->tail = seq;
383 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
386 static void l2cap_chan_timeout(struct work_struct *work)
388 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
389 chan_timer.work);
390 struct l2cap_conn *conn = chan->conn;
391 int reason;
393 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395 mutex_lock(&conn->chan_lock);
396 l2cap_chan_lock(chan);
398 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
399 reason = ECONNREFUSED;
400 else if (chan->state == BT_CONNECT &&
401 chan->sec_level != BT_SECURITY_SDP)
402 reason = ECONNREFUSED;
403 else
404 reason = ETIMEDOUT;
406 l2cap_chan_close(chan, reason);
408 l2cap_chan_unlock(chan);
410 chan->ops->close(chan);
411 mutex_unlock(&conn->chan_lock);
413 l2cap_chan_put(chan);
416 struct l2cap_chan *l2cap_chan_create(void)
418 struct l2cap_chan *chan;
420 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 if (!chan)
422 return NULL;
424 mutex_init(&chan->lock);
426 /* Set default lock nesting level */
427 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
429 write_lock(&chan_list_lock);
430 list_add(&chan->global_l, &chan_list);
431 write_unlock(&chan_list_lock);
433 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
435 chan->state = BT_OPEN;
437 kref_init(&chan->kref);
439 /* This flag is cleared in l2cap_chan_ready() */
440 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
442 BT_DBG("chan %p", chan);
444 return chan;
446 EXPORT_SYMBOL_GPL(l2cap_chan_create);
448 static void l2cap_chan_destroy(struct kref *kref)
450 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
452 BT_DBG("chan %p", chan);
454 write_lock(&chan_list_lock);
455 list_del(&chan->global_l);
456 write_unlock(&chan_list_lock);
458 kfree(chan);
461 void l2cap_chan_hold(struct l2cap_chan *c)
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465 kref_get(&c->kref);
468 void l2cap_chan_put(struct l2cap_chan *c)
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
472 kref_put(&c->kref, l2cap_chan_destroy);
474 EXPORT_SYMBOL_GPL(l2cap_chan_put);
476 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
478 chan->fcs = L2CAP_FCS_CRC16;
479 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
480 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
481 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
482 chan->remote_max_tx = chan->max_tx;
483 chan->remote_tx_win = chan->tx_win;
484 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
485 chan->sec_level = BT_SECURITY_LOW;
486 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
487 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
488 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
489 chan->conf_state = 0;
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
495 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
497 chan->sdu = NULL;
498 chan->sdu_last_frag = NULL;
499 chan->sdu_len = 0;
500 chan->tx_credits = 0;
501 chan->rx_credits = le_max_credits;
502 chan->mps = min_t(u16, chan->imtu, le_default_mps);
504 skb_queue_head_init(&chan->tx_q);
507 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
509 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
510 __le16_to_cpu(chan->psm), chan->dcid);
512 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
514 chan->conn = conn;
516 switch (chan->chan_type) {
517 case L2CAP_CHAN_CONN_ORIENTED:
518 /* Alloc CID for connection-oriented socket */
519 chan->scid = l2cap_alloc_cid(conn);
520 if (conn->hcon->type == ACL_LINK)
521 chan->omtu = L2CAP_DEFAULT_MTU;
522 break;
524 case L2CAP_CHAN_CONN_LESS:
525 /* Connectionless socket */
526 chan->scid = L2CAP_CID_CONN_LESS;
527 chan->dcid = L2CAP_CID_CONN_LESS;
528 chan->omtu = L2CAP_DEFAULT_MTU;
529 break;
531 case L2CAP_CHAN_FIXED:
532 /* Caller will set CID and CID specific MTU values */
533 break;
535 default:
536 /* Raw socket can send/recv signalling messages only */
537 chan->scid = L2CAP_CID_SIGNALING;
538 chan->dcid = L2CAP_CID_SIGNALING;
539 chan->omtu = L2CAP_DEFAULT_MTU;
542 chan->local_id = L2CAP_BESTEFFORT_ID;
543 chan->local_stype = L2CAP_SERV_BESTEFFORT;
544 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
545 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
546 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
547 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
549 l2cap_chan_hold(chan);
551 /* Only keep a reference for fixed channels if they requested it */
552 if (chan->chan_type != L2CAP_CHAN_FIXED ||
553 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
554 hci_conn_hold(conn->hcon);
556 list_add(&chan->list, &conn->chan_l);
559 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
561 mutex_lock(&conn->chan_lock);
562 __l2cap_chan_add(conn, chan);
563 mutex_unlock(&conn->chan_lock);
566 void l2cap_chan_del(struct l2cap_chan *chan, int err)
568 struct l2cap_conn *conn = chan->conn;
570 __clear_chan_timer(chan);
572 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
573 state_to_string(chan->state));
575 chan->ops->teardown(chan, err);
577 if (conn) {
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
584 chan->conn = NULL;
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
590 if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
592 hci_conn_drop(conn->hcon);
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
605 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 return;
608 switch(chan->mode) {
609 case L2CAP_MODE_BASIC:
610 break;
612 case L2CAP_MODE_LE_FLOWCTL:
613 skb_queue_purge(&chan->tx_q);
614 break;
616 case L2CAP_MODE_ERTM:
617 __clear_retrans_timer(chan);
618 __clear_monitor_timer(chan);
619 __clear_ack_timer(chan);
621 skb_queue_purge(&chan->srej_q);
623 l2cap_seq_list_free(&chan->srej_list);
624 l2cap_seq_list_free(&chan->retrans_list);
626 /* fall through */
628 case L2CAP_MODE_STREAMING:
629 skb_queue_purge(&chan->tx_q);
630 break;
633 return;
635 EXPORT_SYMBOL_GPL(l2cap_chan_del);
637 static void l2cap_conn_update_id_addr(struct work_struct *work)
639 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 id_addr_update_work);
641 struct hci_conn *hcon = conn->hcon;
642 struct l2cap_chan *chan;
644 mutex_lock(&conn->chan_lock);
646 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 l2cap_chan_unlock(chan);
653 mutex_unlock(&conn->chan_lock);
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_le_conn_rsp rsp;
660 u16 result;
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_AUTHORIZATION;
664 else
665 result = L2CAP_CR_BAD_PSM;
667 l2cap_state_change(chan, BT_DISCONN);
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.mtu = cpu_to_le16(chan->imtu);
671 rsp.mps = cpu_to_le16(chan->mps);
672 rsp.credits = cpu_to_le16(chan->rx_credits);
673 rsp.result = cpu_to_le16(result);
675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
676 &rsp);
679 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
681 struct l2cap_conn *conn = chan->conn;
682 struct l2cap_conn_rsp rsp;
683 u16 result;
685 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 result = L2CAP_CR_SEC_BLOCK;
687 else
688 result = L2CAP_CR_BAD_PSM;
690 l2cap_state_change(chan, BT_DISCONN);
692 rsp.scid = cpu_to_le16(chan->dcid);
693 rsp.dcid = cpu_to_le16(chan->scid);
694 rsp.result = cpu_to_le16(result);
695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
700 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
702 struct l2cap_conn *conn = chan->conn;
704 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
706 switch (chan->state) {
707 case BT_LISTEN:
708 chan->ops->teardown(chan, 0);
709 break;
711 case BT_CONNECTED:
712 case BT_CONFIG:
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
715 l2cap_send_disconn_req(chan, reason);
716 } else
717 l2cap_chan_del(chan, reason);
718 break;
720 case BT_CONNECT2:
721 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 if (conn->hcon->type == ACL_LINK)
723 l2cap_chan_connect_reject(chan);
724 else if (conn->hcon->type == LE_LINK)
725 l2cap_chan_le_connect_reject(chan);
728 l2cap_chan_del(chan, reason);
729 break;
731 case BT_CONNECT:
732 case BT_DISCONN:
733 l2cap_chan_del(chan, reason);
734 break;
736 default:
737 chan->ops->teardown(chan, 0);
738 break;
741 EXPORT_SYMBOL(l2cap_chan_close);
743 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
745 switch (chan->chan_type) {
746 case L2CAP_CHAN_RAW:
747 switch (chan->sec_level) {
748 case BT_SECURITY_HIGH:
749 case BT_SECURITY_FIPS:
750 return HCI_AT_DEDICATED_BONDING_MITM;
751 case BT_SECURITY_MEDIUM:
752 return HCI_AT_DEDICATED_BONDING;
753 default:
754 return HCI_AT_NO_BONDING;
756 break;
757 case L2CAP_CHAN_CONN_LESS:
758 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
762 if (chan->sec_level == BT_SECURITY_HIGH ||
763 chan->sec_level == BT_SECURITY_FIPS)
764 return HCI_AT_NO_BONDING_MITM;
765 else
766 return HCI_AT_NO_BONDING;
767 break;
768 case L2CAP_CHAN_CONN_ORIENTED:
769 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
770 if (chan->sec_level == BT_SECURITY_LOW)
771 chan->sec_level = BT_SECURITY_SDP;
773 if (chan->sec_level == BT_SECURITY_HIGH ||
774 chan->sec_level == BT_SECURITY_FIPS)
775 return HCI_AT_NO_BONDING_MITM;
776 else
777 return HCI_AT_NO_BONDING;
779 /* fall through */
780 default:
781 switch (chan->sec_level) {
782 case BT_SECURITY_HIGH:
783 case BT_SECURITY_FIPS:
784 return HCI_AT_GENERAL_BONDING_MITM;
785 case BT_SECURITY_MEDIUM:
786 return HCI_AT_GENERAL_BONDING;
787 default:
788 return HCI_AT_NO_BONDING;
790 break;
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
797 struct l2cap_conn *conn = chan->conn;
798 __u8 auth_type;
800 if (conn->hcon->type == LE_LINK)
801 return smp_conn_security(conn->hcon, chan->sec_level);
803 auth_type = l2cap_get_auth_type(chan);
805 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
806 initiator);
809 static u8 l2cap_get_ident(struct l2cap_conn *conn)
811 u8 id;
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
819 mutex_lock(&conn->ident_lock);
821 if (++conn->tx_ident > 128)
822 conn->tx_ident = 1;
824 id = conn->tx_ident;
826 mutex_unlock(&conn->ident_lock);
828 return id;
831 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
832 void *data)
834 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
835 u8 flags;
837 BT_DBG("code 0x%2.2x", code);
839 if (!skb)
840 return;
842 /* Use NO_FLUSH if supported or we have an LE link (which does
843 * not support auto-flushing packets) */
844 if (lmp_no_flush_capable(conn->hcon->hdev) ||
845 conn->hcon->type == LE_LINK)
846 flags = ACL_START_NO_FLUSH;
847 else
848 flags = ACL_START;
850 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
851 skb->priority = HCI_PRIO_MAX;
853 hci_send_acl(conn->hchan, skb, flags);
856 static bool __chan_is_moving(struct l2cap_chan *chan)
858 return chan->move_state != L2CAP_MOVE_STABLE &&
859 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
862 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
864 struct hci_conn *hcon = chan->conn->hcon;
865 u16 flags;
867 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
868 skb->priority);
870 if (chan->hs_hcon && !__chan_is_moving(chan)) {
871 if (chan->hs_hchan)
872 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
873 else
874 kfree_skb(skb);
876 return;
879 /* Use NO_FLUSH for LE links (where this is the only option) or
880 * if the BR/EDR link supports it and flushing has not been
881 * explicitly requested (through FLAG_FLUSHABLE).
883 if (hcon->type == LE_LINK ||
884 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
885 lmp_no_flush_capable(hcon->hdev)))
886 flags = ACL_START_NO_FLUSH;
887 else
888 flags = ACL_START;
890 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
891 hci_send_acl(chan->conn->hchan, skb, flags);
894 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
896 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
897 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
899 if (enh & L2CAP_CTRL_FRAME_TYPE) {
900 /* S-Frame */
901 control->sframe = 1;
902 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
903 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
905 control->sar = 0;
906 control->txseq = 0;
907 } else {
908 /* I-Frame */
909 control->sframe = 0;
910 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
911 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
913 control->poll = 0;
914 control->super = 0;
918 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
920 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
921 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
923 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
924 /* S-Frame */
925 control->sframe = 1;
926 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
927 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
929 control->sar = 0;
930 control->txseq = 0;
931 } else {
932 /* I-Frame */
933 control->sframe = 0;
934 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
935 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
937 control->poll = 0;
938 control->super = 0;
942 static inline void __unpack_control(struct l2cap_chan *chan,
943 struct sk_buff *skb)
945 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
946 __unpack_extended_control(get_unaligned_le32(skb->data),
947 &bt_cb(skb)->control);
948 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
949 } else {
950 __unpack_enhanced_control(get_unaligned_le16(skb->data),
951 &bt_cb(skb)->control);
952 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
956 static u32 __pack_extended_control(struct l2cap_ctrl *control)
958 u32 packed;
960 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
961 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
963 if (control->sframe) {
964 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
965 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
966 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
967 } else {
968 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
969 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
972 return packed;
975 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
977 u16 packed;
979 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
980 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
982 if (control->sframe) {
983 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
984 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
985 packed |= L2CAP_CTRL_FRAME_TYPE;
986 } else {
987 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
988 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
991 return packed;
994 static inline void __pack_control(struct l2cap_chan *chan,
995 struct l2cap_ctrl *control,
996 struct sk_buff *skb)
998 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
999 put_unaligned_le32(__pack_extended_control(control),
1000 skb->data + L2CAP_HDR_SIZE);
1001 } else {
1002 put_unaligned_le16(__pack_enhanced_control(control),
1003 skb->data + L2CAP_HDR_SIZE);
1007 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1009 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1010 return L2CAP_EXT_HDR_SIZE;
1011 else
1012 return L2CAP_ENH_HDR_SIZE;
1015 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1016 u32 control)
1018 struct sk_buff *skb;
1019 struct l2cap_hdr *lh;
1020 int hlen = __ertm_hdr_size(chan);
1022 if (chan->fcs == L2CAP_FCS_CRC16)
1023 hlen += L2CAP_FCS_SIZE;
1025 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1027 if (!skb)
1028 return ERR_PTR(-ENOMEM);
1030 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1031 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1032 lh->cid = cpu_to_le16(chan->dcid);
1034 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1035 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1036 else
1037 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1039 if (chan->fcs == L2CAP_FCS_CRC16) {
1040 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1041 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1044 skb->priority = HCI_PRIO_MAX;
1045 return skb;
1048 static void l2cap_send_sframe(struct l2cap_chan *chan,
1049 struct l2cap_ctrl *control)
1051 struct sk_buff *skb;
1052 u32 control_field;
1054 BT_DBG("chan %p, control %p", chan, control);
1056 if (!control->sframe)
1057 return;
1059 if (__chan_is_moving(chan))
1060 return;
1062 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1063 !control->poll)
1064 control->final = 1;
1066 if (control->super == L2CAP_SUPER_RR)
1067 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1068 else if (control->super == L2CAP_SUPER_RNR)
1069 set_bit(CONN_RNR_SENT, &chan->conn_state);
1071 if (control->super != L2CAP_SUPER_SREJ) {
1072 chan->last_acked_seq = control->reqseq;
1073 __clear_ack_timer(chan);
1076 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1077 control->final, control->poll, control->super);
1079 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1080 control_field = __pack_extended_control(control);
1081 else
1082 control_field = __pack_enhanced_control(control);
1084 skb = l2cap_create_sframe_pdu(chan, control_field);
1085 if (!IS_ERR(skb))
1086 l2cap_do_send(chan, skb);
1089 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1091 struct l2cap_ctrl control;
1093 BT_DBG("chan %p, poll %d", chan, poll);
1095 memset(&control, 0, sizeof(control));
1096 control.sframe = 1;
1097 control.poll = poll;
1099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1100 control.super = L2CAP_SUPER_RNR;
1101 else
1102 control.super = L2CAP_SUPER_RR;
1104 control.reqseq = chan->buffer_seq;
1105 l2cap_send_sframe(chan, &control);
1108 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1110 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1111 return true;
1113 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1116 static bool __amp_capable(struct l2cap_chan *chan)
1118 struct l2cap_conn *conn = chan->conn;
1119 struct hci_dev *hdev;
1120 bool amp_available = false;
1122 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1123 return false;
1125 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1126 return false;
1128 read_lock(&hci_dev_list_lock);
1129 list_for_each_entry(hdev, &hci_dev_list, list) {
1130 if (hdev->amp_type != AMP_TYPE_BREDR &&
1131 test_bit(HCI_UP, &hdev->flags)) {
1132 amp_available = true;
1133 break;
1136 read_unlock(&hci_dev_list_lock);
1138 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1139 return amp_available;
1141 return false;
1144 static bool l2cap_check_efs(struct l2cap_chan *chan)
1146 /* Check EFS parameters */
1147 return true;
1150 void l2cap_send_conn_req(struct l2cap_chan *chan)
1152 struct l2cap_conn *conn = chan->conn;
1153 struct l2cap_conn_req req;
1155 req.scid = cpu_to_le16(chan->scid);
1156 req.psm = chan->psm;
1158 chan->ident = l2cap_get_ident(conn);
1160 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1162 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1165 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1167 struct l2cap_create_chan_req req;
1168 req.scid = cpu_to_le16(chan->scid);
1169 req.psm = chan->psm;
1170 req.amp_id = amp_id;
1172 chan->ident = l2cap_get_ident(chan->conn);
1174 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1175 sizeof(req), &req);
1178 static void l2cap_move_setup(struct l2cap_chan *chan)
1180 struct sk_buff *skb;
1182 BT_DBG("chan %p", chan);
1184 if (chan->mode != L2CAP_MODE_ERTM)
1185 return;
1187 __clear_retrans_timer(chan);
1188 __clear_monitor_timer(chan);
1189 __clear_ack_timer(chan);
1191 chan->retry_count = 0;
1192 skb_queue_walk(&chan->tx_q, skb) {
1193 if (bt_cb(skb)->control.retries)
1194 bt_cb(skb)->control.retries = 1;
1195 else
1196 break;
1199 chan->expected_tx_seq = chan->buffer_seq;
1201 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1202 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1203 l2cap_seq_list_clear(&chan->retrans_list);
1204 l2cap_seq_list_clear(&chan->srej_list);
1205 skb_queue_purge(&chan->srej_q);
1207 chan->tx_state = L2CAP_TX_STATE_XMIT;
1208 chan->rx_state = L2CAP_RX_STATE_MOVE;
1210 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1213 static void l2cap_move_done(struct l2cap_chan *chan)
1215 u8 move_role = chan->move_role;
1216 BT_DBG("chan %p", chan);
1218 chan->move_state = L2CAP_MOVE_STABLE;
1219 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1221 if (chan->mode != L2CAP_MODE_ERTM)
1222 return;
1224 switch (move_role) {
1225 case L2CAP_MOVE_ROLE_INITIATOR:
1226 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1227 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1228 break;
1229 case L2CAP_MOVE_ROLE_RESPONDER:
1230 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1231 break;
1235 static void l2cap_chan_ready(struct l2cap_chan *chan)
1237 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1238 chan->conf_state = 0;
1239 __clear_chan_timer(chan);
1241 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1242 chan->ops->suspend(chan);
1244 chan->state = BT_CONNECTED;
1246 chan->ops->ready(chan);
1249 static void l2cap_le_connect(struct l2cap_chan *chan)
1251 struct l2cap_conn *conn = chan->conn;
1252 struct l2cap_le_conn_req req;
1254 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1255 return;
1257 req.psm = chan->psm;
1258 req.scid = cpu_to_le16(chan->scid);
1259 req.mtu = cpu_to_le16(chan->imtu);
1260 req.mps = cpu_to_le16(chan->mps);
1261 req.credits = cpu_to_le16(chan->rx_credits);
1263 chan->ident = l2cap_get_ident(conn);
1265 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1266 sizeof(req), &req);
1269 static void l2cap_le_start(struct l2cap_chan *chan)
1271 struct l2cap_conn *conn = chan->conn;
1273 if (!smp_conn_security(conn->hcon, chan->sec_level))
1274 return;
1276 if (!chan->psm) {
1277 l2cap_chan_ready(chan);
1278 return;
1281 if (chan->state == BT_CONNECT)
1282 l2cap_le_connect(chan);
1285 static void l2cap_start_connection(struct l2cap_chan *chan)
1287 if (__amp_capable(chan)) {
1288 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1289 a2mp_discover_amp(chan);
1290 } else if (chan->conn->hcon->type == LE_LINK) {
1291 l2cap_le_start(chan);
1292 } else {
1293 l2cap_send_conn_req(chan);
1297 static void l2cap_request_info(struct l2cap_conn *conn)
1299 struct l2cap_info_req req;
1301 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1302 return;
1304 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1306 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1307 conn->info_ident = l2cap_get_ident(conn);
1309 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1311 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1312 sizeof(req), &req);
1315 static void l2cap_do_start(struct l2cap_chan *chan)
1317 struct l2cap_conn *conn = chan->conn;
1319 if (conn->hcon->type == LE_LINK) {
1320 l2cap_le_start(chan);
1321 return;
1324 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1325 l2cap_request_info(conn);
1326 return;
1329 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1330 return;
1332 if (l2cap_chan_check_security(chan, true) &&
1333 __l2cap_no_conn_pending(chan))
1334 l2cap_start_connection(chan);
1337 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1339 u32 local_feat_mask = l2cap_feat_mask;
1340 if (!disable_ertm)
1341 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1343 switch (mode) {
1344 case L2CAP_MODE_ERTM:
1345 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1346 case L2CAP_MODE_STREAMING:
1347 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1348 default:
1349 return 0x00;
1353 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1355 struct l2cap_conn *conn = chan->conn;
1356 struct l2cap_disconn_req req;
1358 if (!conn)
1359 return;
1361 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1362 __clear_retrans_timer(chan);
1363 __clear_monitor_timer(chan);
1364 __clear_ack_timer(chan);
1367 if (chan->scid == L2CAP_CID_A2MP) {
1368 l2cap_state_change(chan, BT_DISCONN);
1369 return;
1372 req.dcid = cpu_to_le16(chan->dcid);
1373 req.scid = cpu_to_le16(chan->scid);
1374 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1375 sizeof(req), &req);
1377 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1380 /* ---- L2CAP connections ---- */
1381 static void l2cap_conn_start(struct l2cap_conn *conn)
1383 struct l2cap_chan *chan, *tmp;
1385 BT_DBG("conn %p", conn);
1387 mutex_lock(&conn->chan_lock);
1389 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1390 l2cap_chan_lock(chan);
1392 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1393 l2cap_chan_ready(chan);
1394 l2cap_chan_unlock(chan);
1395 continue;
1398 if (chan->state == BT_CONNECT) {
1399 if (!l2cap_chan_check_security(chan, true) ||
1400 !__l2cap_no_conn_pending(chan)) {
1401 l2cap_chan_unlock(chan);
1402 continue;
1405 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1406 && test_bit(CONF_STATE2_DEVICE,
1407 &chan->conf_state)) {
1408 l2cap_chan_close(chan, ECONNRESET);
1409 l2cap_chan_unlock(chan);
1410 continue;
1413 l2cap_start_connection(chan);
1415 } else if (chan->state == BT_CONNECT2) {
1416 struct l2cap_conn_rsp rsp;
1417 char buf[128];
1418 rsp.scid = cpu_to_le16(chan->dcid);
1419 rsp.dcid = cpu_to_le16(chan->scid);
1421 if (l2cap_chan_check_security(chan, false)) {
1422 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1423 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1424 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1425 chan->ops->defer(chan);
1427 } else {
1428 l2cap_state_change(chan, BT_CONFIG);
1429 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1430 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1432 } else {
1433 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1434 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1437 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1438 sizeof(rsp), &rsp);
1440 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1441 rsp.result != L2CAP_CR_SUCCESS) {
1442 l2cap_chan_unlock(chan);
1443 continue;
1446 set_bit(CONF_REQ_SENT, &chan->conf_state);
1447 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1448 l2cap_build_conf_req(chan, buf), buf);
1449 chan->num_conf_req++;
1452 l2cap_chan_unlock(chan);
1455 mutex_unlock(&conn->chan_lock);
1458 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1460 struct hci_conn *hcon = conn->hcon;
1461 struct hci_dev *hdev = hcon->hdev;
1463 BT_DBG("%s conn %p", hdev->name, conn);
1465 /* For outgoing pairing which doesn't necessarily have an
1466 * associated socket (e.g. mgmt_pair_device).
1468 if (hcon->out)
1469 smp_conn_security(hcon, hcon->pending_sec_level);
1471 /* For LE slave connections, make sure the connection interval
1472 * is in the range of the minium and maximum interval that has
1473 * been configured for this connection. If not, then trigger
1474 * the connection update procedure.
1476 if (hcon->role == HCI_ROLE_SLAVE &&
1477 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1478 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1479 struct l2cap_conn_param_update_req req;
1481 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1482 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1483 req.latency = cpu_to_le16(hcon->le_conn_latency);
1484 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1486 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1487 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1491 static void l2cap_conn_ready(struct l2cap_conn *conn)
1493 struct l2cap_chan *chan;
1494 struct hci_conn *hcon = conn->hcon;
1496 BT_DBG("conn %p", conn);
1498 if (hcon->type == ACL_LINK)
1499 l2cap_request_info(conn);
1501 mutex_lock(&conn->chan_lock);
1503 list_for_each_entry(chan, &conn->chan_l, list) {
1505 l2cap_chan_lock(chan);
1507 if (chan->scid == L2CAP_CID_A2MP) {
1508 l2cap_chan_unlock(chan);
1509 continue;
1512 if (hcon->type == LE_LINK) {
1513 l2cap_le_start(chan);
1514 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1515 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1516 l2cap_chan_ready(chan);
1517 } else if (chan->state == BT_CONNECT) {
1518 l2cap_do_start(chan);
1521 l2cap_chan_unlock(chan);
1524 mutex_unlock(&conn->chan_lock);
1526 if (hcon->type == LE_LINK)
1527 l2cap_le_conn_ready(conn);
1529 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1532 /* Notify sockets that we cannot guaranty reliability anymore */
1533 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1535 struct l2cap_chan *chan;
1537 BT_DBG("conn %p", conn);
1539 mutex_lock(&conn->chan_lock);
1541 list_for_each_entry(chan, &conn->chan_l, list) {
1542 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1543 l2cap_chan_set_err(chan, err);
1546 mutex_unlock(&conn->chan_lock);
1549 static void l2cap_info_timeout(struct work_struct *work)
1551 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1552 info_timer.work);
1554 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1555 conn->info_ident = 0;
1557 l2cap_conn_start(conn);
1561 * l2cap_user
1562 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1563 * callback is called during registration. The ->remove callback is called
1564 * during unregistration.
1565 * An l2cap_user object can either be explicitly unregistered or when the
1566 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1567 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1568 * External modules must own a reference to the l2cap_conn object if they intend
1569 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1570 * any time if they don't.
1573 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1575 struct hci_dev *hdev = conn->hcon->hdev;
1576 int ret;
1578 /* We need to check whether l2cap_conn is registered. If it is not, we
1579 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1580 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1581 * relies on the parent hci_conn object to be locked. This itself relies
1582 * on the hci_dev object to be locked. So we must lock the hci device
1583 * here, too. */
1585 hci_dev_lock(hdev);
1587 if (user->list.next || user->list.prev) {
1588 ret = -EINVAL;
1589 goto out_unlock;
1592 /* conn->hchan is NULL after l2cap_conn_del() was called */
1593 if (!conn->hchan) {
1594 ret = -ENODEV;
1595 goto out_unlock;
1598 ret = user->probe(conn, user);
1599 if (ret)
1600 goto out_unlock;
1602 list_add(&user->list, &conn->users);
1603 ret = 0;
1605 out_unlock:
1606 hci_dev_unlock(hdev);
1607 return ret;
1609 EXPORT_SYMBOL(l2cap_register_user);
1611 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1613 struct hci_dev *hdev = conn->hcon->hdev;
1615 hci_dev_lock(hdev);
1617 if (!user->list.next || !user->list.prev)
1618 goto out_unlock;
1620 list_del(&user->list);
1621 user->list.next = NULL;
1622 user->list.prev = NULL;
1623 user->remove(conn, user);
1625 out_unlock:
1626 hci_dev_unlock(hdev);
1628 EXPORT_SYMBOL(l2cap_unregister_user);
1630 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1632 struct l2cap_user *user;
1634 while (!list_empty(&conn->users)) {
1635 user = list_first_entry(&conn->users, struct l2cap_user, list);
1636 list_del(&user->list);
1637 user->list.next = NULL;
1638 user->list.prev = NULL;
1639 user->remove(conn, user);
1643 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1645 struct l2cap_conn *conn = hcon->l2cap_data;
1646 struct l2cap_chan *chan, *l;
1648 if (!conn)
1649 return;
1651 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1653 kfree_skb(conn->rx_skb);
1655 skb_queue_purge(&conn->pending_rx);
1657 /* We can not call flush_work(&conn->pending_rx_work) here since we
1658 * might block if we are running on a worker from the same workqueue
1659 * pending_rx_work is waiting on.
1661 if (work_pending(&conn->pending_rx_work))
1662 cancel_work_sync(&conn->pending_rx_work);
1664 if (work_pending(&conn->id_addr_update_work))
1665 cancel_work_sync(&conn->id_addr_update_work);
1667 l2cap_unregister_all_users(conn);
1669 /* Force the connection to be immediately dropped */
1670 hcon->disc_timeout = 0;
1672 mutex_lock(&conn->chan_lock);
1674 /* Kill channels */
1675 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1676 l2cap_chan_hold(chan);
1677 l2cap_chan_lock(chan);
1679 l2cap_chan_del(chan, err);
1681 l2cap_chan_unlock(chan);
1683 chan->ops->close(chan);
1684 l2cap_chan_put(chan);
1687 mutex_unlock(&conn->chan_lock);
1689 hci_chan_del(conn->hchan);
1691 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1692 cancel_delayed_work_sync(&conn->info_timer);
1694 hcon->l2cap_data = NULL;
1695 conn->hchan = NULL;
1696 l2cap_conn_put(conn);
1699 static void l2cap_conn_free(struct kref *ref)
1701 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1703 hci_conn_put(conn->hcon);
1704 kfree(conn);
1707 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1709 kref_get(&conn->ref);
1710 return conn;
1712 EXPORT_SYMBOL(l2cap_conn_get);
1714 void l2cap_conn_put(struct l2cap_conn *conn)
1716 kref_put(&conn->ref, l2cap_conn_free);
1718 EXPORT_SYMBOL(l2cap_conn_put);
1720 /* ---- Socket interface ---- */
1722 /* Find socket with psm and source / destination bdaddr.
1723 * Returns closest match.
1725 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1726 bdaddr_t *src,
1727 bdaddr_t *dst,
1728 u8 link_type)
1730 struct l2cap_chan *c, *c1 = NULL;
1732 read_lock(&chan_list_lock);
1734 list_for_each_entry(c, &chan_list, global_l) {
1735 if (state && c->state != state)
1736 continue;
1738 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1739 continue;
1741 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1742 continue;
1744 if (c->psm == psm) {
1745 int src_match, dst_match;
1746 int src_any, dst_any;
1748 /* Exact match. */
1749 src_match = !bacmp(&c->src, src);
1750 dst_match = !bacmp(&c->dst, dst);
1751 if (src_match && dst_match) {
1752 l2cap_chan_hold(c);
1753 read_unlock(&chan_list_lock);
1754 return c;
1757 /* Closest match */
1758 src_any = !bacmp(&c->src, BDADDR_ANY);
1759 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1760 if ((src_match && dst_any) || (src_any && dst_match) ||
1761 (src_any && dst_any))
1762 c1 = c;
1766 if (c1)
1767 l2cap_chan_hold(c1);
1769 read_unlock(&chan_list_lock);
1771 return c1;
1774 static void l2cap_monitor_timeout(struct work_struct *work)
1776 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1777 monitor_timer.work);
1779 BT_DBG("chan %p", chan);
1781 l2cap_chan_lock(chan);
1783 if (!chan->conn) {
1784 l2cap_chan_unlock(chan);
1785 l2cap_chan_put(chan);
1786 return;
1789 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1791 l2cap_chan_unlock(chan);
1792 l2cap_chan_put(chan);
1795 static void l2cap_retrans_timeout(struct work_struct *work)
1797 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1798 retrans_timer.work);
1800 BT_DBG("chan %p", chan);
1802 l2cap_chan_lock(chan);
1804 if (!chan->conn) {
1805 l2cap_chan_unlock(chan);
1806 l2cap_chan_put(chan);
1807 return;
1810 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1811 l2cap_chan_unlock(chan);
1812 l2cap_chan_put(chan);
1815 static void l2cap_streaming_send(struct l2cap_chan *chan,
1816 struct sk_buff_head *skbs)
1818 struct sk_buff *skb;
1819 struct l2cap_ctrl *control;
1821 BT_DBG("chan %p, skbs %p", chan, skbs);
1823 if (__chan_is_moving(chan))
1824 return;
1826 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1828 while (!skb_queue_empty(&chan->tx_q)) {
1830 skb = skb_dequeue(&chan->tx_q);
1832 bt_cb(skb)->control.retries = 1;
1833 control = &bt_cb(skb)->control;
1835 control->reqseq = 0;
1836 control->txseq = chan->next_tx_seq;
1838 __pack_control(chan, control, skb);
1840 if (chan->fcs == L2CAP_FCS_CRC16) {
1841 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1842 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1845 l2cap_do_send(chan, skb);
1847 BT_DBG("Sent txseq %u", control->txseq);
1849 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1850 chan->frames_sent++;
1854 static int l2cap_ertm_send(struct l2cap_chan *chan)
1856 struct sk_buff *skb, *tx_skb;
1857 struct l2cap_ctrl *control;
1858 int sent = 0;
1860 BT_DBG("chan %p", chan);
1862 if (chan->state != BT_CONNECTED)
1863 return -ENOTCONN;
1865 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1866 return 0;
1868 if (__chan_is_moving(chan))
1869 return 0;
1871 while (chan->tx_send_head &&
1872 chan->unacked_frames < chan->remote_tx_win &&
1873 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1875 skb = chan->tx_send_head;
1877 bt_cb(skb)->control.retries = 1;
1878 control = &bt_cb(skb)->control;
1880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1881 control->final = 1;
1883 control->reqseq = chan->buffer_seq;
1884 chan->last_acked_seq = chan->buffer_seq;
1885 control->txseq = chan->next_tx_seq;
1887 __pack_control(chan, control, skb);
1889 if (chan->fcs == L2CAP_FCS_CRC16) {
1890 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1891 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1894 /* Clone after data has been modified. Data is assumed to be
1895 read-only (for locking purposes) on cloned sk_buffs.
1897 tx_skb = skb_clone(skb, GFP_KERNEL);
1899 if (!tx_skb)
1900 break;
1902 __set_retrans_timer(chan);
1904 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1905 chan->unacked_frames++;
1906 chan->frames_sent++;
1907 sent++;
1909 if (skb_queue_is_last(&chan->tx_q, skb))
1910 chan->tx_send_head = NULL;
1911 else
1912 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1914 l2cap_do_send(chan, tx_skb);
1915 BT_DBG("Sent txseq %u", control->txseq);
1918 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1919 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1921 return sent;
1924 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1926 struct l2cap_ctrl control;
1927 struct sk_buff *skb;
1928 struct sk_buff *tx_skb;
1929 u16 seq;
1931 BT_DBG("chan %p", chan);
1933 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1934 return;
1936 if (__chan_is_moving(chan))
1937 return;
1939 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1940 seq = l2cap_seq_list_pop(&chan->retrans_list);
1942 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1943 if (!skb) {
1944 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1945 seq);
1946 continue;
1949 bt_cb(skb)->control.retries++;
1950 control = bt_cb(skb)->control;
1952 if (chan->max_tx != 0 &&
1953 bt_cb(skb)->control.retries > chan->max_tx) {
1954 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1955 l2cap_send_disconn_req(chan, ECONNRESET);
1956 l2cap_seq_list_clear(&chan->retrans_list);
1957 break;
1960 control.reqseq = chan->buffer_seq;
1961 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1962 control.final = 1;
1963 else
1964 control.final = 0;
1966 if (skb_cloned(skb)) {
1967 /* Cloned sk_buffs are read-only, so we need a
1968 * writeable copy
1970 tx_skb = skb_copy(skb, GFP_KERNEL);
1971 } else {
1972 tx_skb = skb_clone(skb, GFP_KERNEL);
1975 if (!tx_skb) {
1976 l2cap_seq_list_clear(&chan->retrans_list);
1977 break;
1980 /* Update skb contents */
1981 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1982 put_unaligned_le32(__pack_extended_control(&control),
1983 tx_skb->data + L2CAP_HDR_SIZE);
1984 } else {
1985 put_unaligned_le16(__pack_enhanced_control(&control),
1986 tx_skb->data + L2CAP_HDR_SIZE);
1989 /* Update FCS */
1990 if (chan->fcs == L2CAP_FCS_CRC16) {
1991 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1992 tx_skb->len - L2CAP_FCS_SIZE);
1993 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1994 L2CAP_FCS_SIZE);
1997 l2cap_do_send(chan, tx_skb);
1999 BT_DBG("Resent txseq %d", control.txseq);
2001 chan->last_acked_seq = chan->buffer_seq;
2005 static void l2cap_retransmit(struct l2cap_chan *chan,
2006 struct l2cap_ctrl *control)
2008 BT_DBG("chan %p, control %p", chan, control);
2010 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2011 l2cap_ertm_resend(chan);
2014 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2015 struct l2cap_ctrl *control)
2017 struct sk_buff *skb;
2019 BT_DBG("chan %p, control %p", chan, control);
2021 if (control->poll)
2022 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2024 l2cap_seq_list_clear(&chan->retrans_list);
2026 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2027 return;
2029 if (chan->unacked_frames) {
2030 skb_queue_walk(&chan->tx_q, skb) {
2031 if (bt_cb(skb)->control.txseq == control->reqseq ||
2032 skb == chan->tx_send_head)
2033 break;
2036 skb_queue_walk_from(&chan->tx_q, skb) {
2037 if (skb == chan->tx_send_head)
2038 break;
2040 l2cap_seq_list_append(&chan->retrans_list,
2041 bt_cb(skb)->control.txseq);
2044 l2cap_ertm_resend(chan);
2048 static void l2cap_send_ack(struct l2cap_chan *chan)
2050 struct l2cap_ctrl control;
2051 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2052 chan->last_acked_seq);
2053 int threshold;
2055 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2056 chan, chan->last_acked_seq, chan->buffer_seq);
2058 memset(&control, 0, sizeof(control));
2059 control.sframe = 1;
2061 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2062 chan->rx_state == L2CAP_RX_STATE_RECV) {
2063 __clear_ack_timer(chan);
2064 control.super = L2CAP_SUPER_RNR;
2065 control.reqseq = chan->buffer_seq;
2066 l2cap_send_sframe(chan, &control);
2067 } else {
2068 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2069 l2cap_ertm_send(chan);
2070 /* If any i-frames were sent, they included an ack */
2071 if (chan->buffer_seq == chan->last_acked_seq)
2072 frames_to_ack = 0;
2075 /* Ack now if the window is 3/4ths full.
2076 * Calculate without mul or div
2078 threshold = chan->ack_win;
2079 threshold += threshold << 1;
2080 threshold >>= 2;
2082 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2083 threshold);
2085 if (frames_to_ack >= threshold) {
2086 __clear_ack_timer(chan);
2087 control.super = L2CAP_SUPER_RR;
2088 control.reqseq = chan->buffer_seq;
2089 l2cap_send_sframe(chan, &control);
2090 frames_to_ack = 0;
2093 if (frames_to_ack)
2094 __set_ack_timer(chan);
2098 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2099 struct msghdr *msg, int len,
2100 int count, struct sk_buff *skb)
2102 struct l2cap_conn *conn = chan->conn;
2103 struct sk_buff **frag;
2104 int sent = 0;
2106 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2107 return -EFAULT;
2109 sent += count;
2110 len -= count;
2112 /* Continuation fragments (no L2CAP header) */
2113 frag = &skb_shinfo(skb)->frag_list;
2114 while (len) {
2115 struct sk_buff *tmp;
2117 count = min_t(unsigned int, conn->mtu, len);
2119 tmp = chan->ops->alloc_skb(chan, 0, count,
2120 msg->msg_flags & MSG_DONTWAIT);
2121 if (IS_ERR(tmp))
2122 return PTR_ERR(tmp);
2124 *frag = tmp;
2126 if (copy_from_iter(skb_put(*frag, count), count,
2127 &msg->msg_iter) != count)
2128 return -EFAULT;
2130 sent += count;
2131 len -= count;
2133 skb->len += (*frag)->len;
2134 skb->data_len += (*frag)->len;
2136 frag = &(*frag)->next;
2139 return sent;
2142 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2143 struct msghdr *msg, size_t len)
2145 struct l2cap_conn *conn = chan->conn;
2146 struct sk_buff *skb;
2147 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2148 struct l2cap_hdr *lh;
2150 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2151 __le16_to_cpu(chan->psm), len);
2153 count = min_t(unsigned int, (conn->mtu - hlen), len);
2155 skb = chan->ops->alloc_skb(chan, hlen, count,
2156 msg->msg_flags & MSG_DONTWAIT);
2157 if (IS_ERR(skb))
2158 return skb;
2160 /* Create L2CAP header */
2161 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2162 lh->cid = cpu_to_le16(chan->dcid);
2163 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2164 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2166 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2167 if (unlikely(err < 0)) {
2168 kfree_skb(skb);
2169 return ERR_PTR(err);
2171 return skb;
2174 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2175 struct msghdr *msg, size_t len)
2177 struct l2cap_conn *conn = chan->conn;
2178 struct sk_buff *skb;
2179 int err, count;
2180 struct l2cap_hdr *lh;
2182 BT_DBG("chan %p len %zu", chan, len);
2184 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2186 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2187 msg->msg_flags & MSG_DONTWAIT);
2188 if (IS_ERR(skb))
2189 return skb;
2191 /* Create L2CAP header */
2192 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2193 lh->cid = cpu_to_le16(chan->dcid);
2194 lh->len = cpu_to_le16(len);
2196 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2197 if (unlikely(err < 0)) {
2198 kfree_skb(skb);
2199 return ERR_PTR(err);
2201 return skb;
2204 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2205 struct msghdr *msg, size_t len,
2206 u16 sdulen)
2208 struct l2cap_conn *conn = chan->conn;
2209 struct sk_buff *skb;
2210 int err, count, hlen;
2211 struct l2cap_hdr *lh;
2213 BT_DBG("chan %p len %zu", chan, len);
2215 if (!conn)
2216 return ERR_PTR(-ENOTCONN);
2218 hlen = __ertm_hdr_size(chan);
2220 if (sdulen)
2221 hlen += L2CAP_SDULEN_SIZE;
2223 if (chan->fcs == L2CAP_FCS_CRC16)
2224 hlen += L2CAP_FCS_SIZE;
2226 count = min_t(unsigned int, (conn->mtu - hlen), len);
2228 skb = chan->ops->alloc_skb(chan, hlen, count,
2229 msg->msg_flags & MSG_DONTWAIT);
2230 if (IS_ERR(skb))
2231 return skb;
2233 /* Create L2CAP header */
2234 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2235 lh->cid = cpu_to_le16(chan->dcid);
2236 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2238 /* Control header is populated later */
2239 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2240 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2241 else
2242 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2244 if (sdulen)
2245 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2247 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2248 if (unlikely(err < 0)) {
2249 kfree_skb(skb);
2250 return ERR_PTR(err);
2253 bt_cb(skb)->control.fcs = chan->fcs;
2254 bt_cb(skb)->control.retries = 0;
2255 return skb;
2258 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2259 struct sk_buff_head *seg_queue,
2260 struct msghdr *msg, size_t len)
2262 struct sk_buff *skb;
2263 u16 sdu_len;
2264 size_t pdu_len;
2265 u8 sar;
2267 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2269 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2270 * so fragmented skbs are not used. The HCI layer's handling
2271 * of fragmented skbs is not compatible with ERTM's queueing.
2274 /* PDU size is derived from the HCI MTU */
2275 pdu_len = chan->conn->mtu;
2277 /* Constrain PDU size for BR/EDR connections */
2278 if (!chan->hs_hcon)
2279 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2281 /* Adjust for largest possible L2CAP overhead. */
2282 if (chan->fcs)
2283 pdu_len -= L2CAP_FCS_SIZE;
2285 pdu_len -= __ertm_hdr_size(chan);
2287 /* Remote device may have requested smaller PDUs */
2288 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2290 if (len <= pdu_len) {
2291 sar = L2CAP_SAR_UNSEGMENTED;
2292 sdu_len = 0;
2293 pdu_len = len;
2294 } else {
2295 sar = L2CAP_SAR_START;
2296 sdu_len = len;
2299 while (len > 0) {
2300 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2302 if (IS_ERR(skb)) {
2303 __skb_queue_purge(seg_queue);
2304 return PTR_ERR(skb);
2307 bt_cb(skb)->control.sar = sar;
2308 __skb_queue_tail(seg_queue, skb);
2310 len -= pdu_len;
2311 if (sdu_len)
2312 sdu_len = 0;
2314 if (len <= pdu_len) {
2315 sar = L2CAP_SAR_END;
2316 pdu_len = len;
2317 } else {
2318 sar = L2CAP_SAR_CONTINUE;
2322 return 0;
2325 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2326 struct msghdr *msg,
2327 size_t len, u16 sdulen)
2329 struct l2cap_conn *conn = chan->conn;
2330 struct sk_buff *skb;
2331 int err, count, hlen;
2332 struct l2cap_hdr *lh;
2334 BT_DBG("chan %p len %zu", chan, len);
2336 if (!conn)
2337 return ERR_PTR(-ENOTCONN);
2339 hlen = L2CAP_HDR_SIZE;
2341 if (sdulen)
2342 hlen += L2CAP_SDULEN_SIZE;
2344 count = min_t(unsigned int, (conn->mtu - hlen), len);
2346 skb = chan->ops->alloc_skb(chan, hlen, count,
2347 msg->msg_flags & MSG_DONTWAIT);
2348 if (IS_ERR(skb))
2349 return skb;
2351 /* Create L2CAP header */
2352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2353 lh->cid = cpu_to_le16(chan->dcid);
2354 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2356 if (sdulen)
2357 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2359 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2360 if (unlikely(err < 0)) {
2361 kfree_skb(skb);
2362 return ERR_PTR(err);
2365 return skb;
2368 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2369 struct sk_buff_head *seg_queue,
2370 struct msghdr *msg, size_t len)
2372 struct sk_buff *skb;
2373 size_t pdu_len;
2374 u16 sdu_len;
2376 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2378 sdu_len = len;
2379 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2381 while (len > 0) {
2382 if (len <= pdu_len)
2383 pdu_len = len;
2385 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2386 if (IS_ERR(skb)) {
2387 __skb_queue_purge(seg_queue);
2388 return PTR_ERR(skb);
2391 __skb_queue_tail(seg_queue, skb);
2393 len -= pdu_len;
2395 if (sdu_len) {
2396 sdu_len = 0;
2397 pdu_len += L2CAP_SDULEN_SIZE;
2401 return 0;
2404 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2406 struct sk_buff *skb;
2407 int err;
2408 struct sk_buff_head seg_queue;
2410 if (!chan->conn)
2411 return -ENOTCONN;
2413 /* Connectionless channel */
2414 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2415 skb = l2cap_create_connless_pdu(chan, msg, len);
2416 if (IS_ERR(skb))
2417 return PTR_ERR(skb);
2419 /* Channel lock is released before requesting new skb and then
2420 * reacquired thus we need to recheck channel state.
2422 if (chan->state != BT_CONNECTED) {
2423 kfree_skb(skb);
2424 return -ENOTCONN;
2427 l2cap_do_send(chan, skb);
2428 return len;
2431 switch (chan->mode) {
2432 case L2CAP_MODE_LE_FLOWCTL:
2433 /* Check outgoing MTU */
2434 if (len > chan->omtu)
2435 return -EMSGSIZE;
2437 if (!chan->tx_credits)
2438 return -EAGAIN;
2440 __skb_queue_head_init(&seg_queue);
2442 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2444 if (chan->state != BT_CONNECTED) {
2445 __skb_queue_purge(&seg_queue);
2446 err = -ENOTCONN;
2449 if (err)
2450 return err;
2452 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2454 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2455 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2456 chan->tx_credits--;
2459 if (!chan->tx_credits)
2460 chan->ops->suspend(chan);
2462 err = len;
2464 break;
2466 case L2CAP_MODE_BASIC:
2467 /* Check outgoing MTU */
2468 if (len > chan->omtu)
2469 return -EMSGSIZE;
2471 /* Create a basic PDU */
2472 skb = l2cap_create_basic_pdu(chan, msg, len);
2473 if (IS_ERR(skb))
2474 return PTR_ERR(skb);
2476 /* Channel lock is released before requesting new skb and then
2477 * reacquired thus we need to recheck channel state.
2479 if (chan->state != BT_CONNECTED) {
2480 kfree_skb(skb);
2481 return -ENOTCONN;
2484 l2cap_do_send(chan, skb);
2485 err = len;
2486 break;
2488 case L2CAP_MODE_ERTM:
2489 case L2CAP_MODE_STREAMING:
2490 /* Check outgoing MTU */
2491 if (len > chan->omtu) {
2492 err = -EMSGSIZE;
2493 break;
2496 __skb_queue_head_init(&seg_queue);
2498 /* Do segmentation before calling in to the state machine,
2499 * since it's possible to block while waiting for memory
2500 * allocation.
2502 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2504 /* The channel could have been closed while segmenting,
2505 * check that it is still connected.
2507 if (chan->state != BT_CONNECTED) {
2508 __skb_queue_purge(&seg_queue);
2509 err = -ENOTCONN;
2512 if (err)
2513 break;
2515 if (chan->mode == L2CAP_MODE_ERTM)
2516 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2517 else
2518 l2cap_streaming_send(chan, &seg_queue);
2520 err = len;
2522 /* If the skbs were not queued for sending, they'll still be in
2523 * seg_queue and need to be purged.
2525 __skb_queue_purge(&seg_queue);
2526 break;
2528 default:
2529 BT_DBG("bad state %1.1x", chan->mode);
2530 err = -EBADFD;
2533 return err;
2535 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2537 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2539 struct l2cap_ctrl control;
2540 u16 seq;
2542 BT_DBG("chan %p, txseq %u", chan, txseq);
2544 memset(&control, 0, sizeof(control));
2545 control.sframe = 1;
2546 control.super = L2CAP_SUPER_SREJ;
2548 for (seq = chan->expected_tx_seq; seq != txseq;
2549 seq = __next_seq(chan, seq)) {
2550 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2551 control.reqseq = seq;
2552 l2cap_send_sframe(chan, &control);
2553 l2cap_seq_list_append(&chan->srej_list, seq);
2557 chan->expected_tx_seq = __next_seq(chan, txseq);
2560 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2562 struct l2cap_ctrl control;
2564 BT_DBG("chan %p", chan);
2566 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2567 return;
2569 memset(&control, 0, sizeof(control));
2570 control.sframe = 1;
2571 control.super = L2CAP_SUPER_SREJ;
2572 control.reqseq = chan->srej_list.tail;
2573 l2cap_send_sframe(chan, &control);
2576 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2578 struct l2cap_ctrl control;
2579 u16 initial_head;
2580 u16 seq;
2582 BT_DBG("chan %p, txseq %u", chan, txseq);
2584 memset(&control, 0, sizeof(control));
2585 control.sframe = 1;
2586 control.super = L2CAP_SUPER_SREJ;
2588 /* Capture initial list head to allow only one pass through the list. */
2589 initial_head = chan->srej_list.head;
2591 do {
2592 seq = l2cap_seq_list_pop(&chan->srej_list);
2593 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2594 break;
2596 control.reqseq = seq;
2597 l2cap_send_sframe(chan, &control);
2598 l2cap_seq_list_append(&chan->srej_list, seq);
2599 } while (chan->srej_list.head != initial_head);
2602 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2604 struct sk_buff *acked_skb;
2605 u16 ackseq;
2607 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2609 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2610 return;
2612 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2613 chan->expected_ack_seq, chan->unacked_frames);
2615 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2616 ackseq = __next_seq(chan, ackseq)) {
2618 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2619 if (acked_skb) {
2620 skb_unlink(acked_skb, &chan->tx_q);
2621 kfree_skb(acked_skb);
2622 chan->unacked_frames--;
2626 chan->expected_ack_seq = reqseq;
2628 if (chan->unacked_frames == 0)
2629 __clear_retrans_timer(chan);
2631 BT_DBG("unacked_frames %u", chan->unacked_frames);
2634 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2636 BT_DBG("chan %p", chan);
2638 chan->expected_tx_seq = chan->buffer_seq;
2639 l2cap_seq_list_clear(&chan->srej_list);
2640 skb_queue_purge(&chan->srej_q);
2641 chan->rx_state = L2CAP_RX_STATE_RECV;
2644 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2645 struct l2cap_ctrl *control,
2646 struct sk_buff_head *skbs, u8 event)
2648 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2649 event);
2651 switch (event) {
2652 case L2CAP_EV_DATA_REQUEST:
2653 if (chan->tx_send_head == NULL)
2654 chan->tx_send_head = skb_peek(skbs);
2656 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2657 l2cap_ertm_send(chan);
2658 break;
2659 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2660 BT_DBG("Enter LOCAL_BUSY");
2661 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2664 /* The SREJ_SENT state must be aborted if we are to
2665 * enter the LOCAL_BUSY state.
2667 l2cap_abort_rx_srej_sent(chan);
2670 l2cap_send_ack(chan);
2672 break;
2673 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2674 BT_DBG("Exit LOCAL_BUSY");
2675 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2677 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2678 struct l2cap_ctrl local_control;
2680 memset(&local_control, 0, sizeof(local_control));
2681 local_control.sframe = 1;
2682 local_control.super = L2CAP_SUPER_RR;
2683 local_control.poll = 1;
2684 local_control.reqseq = chan->buffer_seq;
2685 l2cap_send_sframe(chan, &local_control);
2687 chan->retry_count = 1;
2688 __set_monitor_timer(chan);
2689 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 break;
2692 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2693 l2cap_process_reqseq(chan, control->reqseq);
2694 break;
2695 case L2CAP_EV_EXPLICIT_POLL:
2696 l2cap_send_rr_or_rnr(chan, 1);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 __clear_ack_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 break;
2702 case L2CAP_EV_RETRANS_TO:
2703 l2cap_send_rr_or_rnr(chan, 1);
2704 chan->retry_count = 1;
2705 __set_monitor_timer(chan);
2706 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2707 break;
2708 case L2CAP_EV_RECV_FBIT:
2709 /* Nothing to process */
2710 break;
2711 default:
2712 break;
2716 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2717 struct l2cap_ctrl *control,
2718 struct sk_buff_head *skbs, u8 event)
2720 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2721 event);
2723 switch (event) {
2724 case L2CAP_EV_DATA_REQUEST:
2725 if (chan->tx_send_head == NULL)
2726 chan->tx_send_head = skb_peek(skbs);
2727 /* Queue data, but don't send. */
2728 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2729 break;
2730 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2731 BT_DBG("Enter LOCAL_BUSY");
2732 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2735 /* The SREJ_SENT state must be aborted if we are to
2736 * enter the LOCAL_BUSY state.
2738 l2cap_abort_rx_srej_sent(chan);
2741 l2cap_send_ack(chan);
2743 break;
2744 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2745 BT_DBG("Exit LOCAL_BUSY");
2746 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2749 struct l2cap_ctrl local_control;
2750 memset(&local_control, 0, sizeof(local_control));
2751 local_control.sframe = 1;
2752 local_control.super = L2CAP_SUPER_RR;
2753 local_control.poll = 1;
2754 local_control.reqseq = chan->buffer_seq;
2755 l2cap_send_sframe(chan, &local_control);
2757 chan->retry_count = 1;
2758 __set_monitor_timer(chan);
2759 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2761 break;
2762 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2763 l2cap_process_reqseq(chan, control->reqseq);
2765 /* Fall through */
2767 case L2CAP_EV_RECV_FBIT:
2768 if (control && control->final) {
2769 __clear_monitor_timer(chan);
2770 if (chan->unacked_frames > 0)
2771 __set_retrans_timer(chan);
2772 chan->retry_count = 0;
2773 chan->tx_state = L2CAP_TX_STATE_XMIT;
2774 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2776 break;
2777 case L2CAP_EV_EXPLICIT_POLL:
2778 /* Ignore */
2779 break;
2780 case L2CAP_EV_MONITOR_TO:
2781 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2782 l2cap_send_rr_or_rnr(chan, 1);
2783 __set_monitor_timer(chan);
2784 chan->retry_count++;
2785 } else {
2786 l2cap_send_disconn_req(chan, ECONNABORTED);
2788 break;
2789 default:
2790 break;
2794 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2795 struct sk_buff_head *skbs, u8 event)
2797 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2798 chan, control, skbs, event, chan->tx_state);
2800 switch (chan->tx_state) {
2801 case L2CAP_TX_STATE_XMIT:
2802 l2cap_tx_state_xmit(chan, control, skbs, event);
2803 break;
2804 case L2CAP_TX_STATE_WAIT_F:
2805 l2cap_tx_state_wait_f(chan, control, skbs, event);
2806 break;
2807 default:
2808 /* Ignore event */
2809 break;
2813 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2814 struct l2cap_ctrl *control)
2816 BT_DBG("chan %p, control %p", chan, control);
2817 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2820 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2821 struct l2cap_ctrl *control)
2823 BT_DBG("chan %p, control %p", chan, control);
2824 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2827 /* Copy frame to all raw sockets on that connection */
2828 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2830 struct sk_buff *nskb;
2831 struct l2cap_chan *chan;
2833 BT_DBG("conn %p", conn);
2835 mutex_lock(&conn->chan_lock);
2837 list_for_each_entry(chan, &conn->chan_l, list) {
2838 if (chan->chan_type != L2CAP_CHAN_RAW)
2839 continue;
2841 /* Don't send frame to the channel it came from */
2842 if (bt_cb(skb)->chan == chan)
2843 continue;
2845 nskb = skb_clone(skb, GFP_KERNEL);
2846 if (!nskb)
2847 continue;
2848 if (chan->ops->recv(chan, nskb))
2849 kfree_skb(nskb);
2852 mutex_unlock(&conn->chan_lock);
2855 /* ---- L2CAP signalling commands ---- */
2856 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2857 u8 ident, u16 dlen, void *data)
2859 struct sk_buff *skb, **frag;
2860 struct l2cap_cmd_hdr *cmd;
2861 struct l2cap_hdr *lh;
2862 int len, count;
2864 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2865 conn, code, ident, dlen);
2867 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2868 return NULL;
2870 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2871 count = min_t(unsigned int, conn->mtu, len);
2873 skb = bt_skb_alloc(count, GFP_KERNEL);
2874 if (!skb)
2875 return NULL;
2877 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2878 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2880 if (conn->hcon->type == LE_LINK)
2881 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2882 else
2883 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2885 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2886 cmd->code = code;
2887 cmd->ident = ident;
2888 cmd->len = cpu_to_le16(dlen);
2890 if (dlen) {
2891 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2892 memcpy(skb_put(skb, count), data, count);
2893 data += count;
2896 len -= skb->len;
2898 /* Continuation fragments (no L2CAP header) */
2899 frag = &skb_shinfo(skb)->frag_list;
2900 while (len) {
2901 count = min_t(unsigned int, conn->mtu, len);
2903 *frag = bt_skb_alloc(count, GFP_KERNEL);
2904 if (!*frag)
2905 goto fail;
2907 memcpy(skb_put(*frag, count), data, count);
2909 len -= count;
2910 data += count;
2912 frag = &(*frag)->next;
2915 return skb;
2917 fail:
2918 kfree_skb(skb);
2919 return NULL;
2922 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2923 unsigned long *val)
2925 struct l2cap_conf_opt *opt = *ptr;
2926 int len;
2928 len = L2CAP_CONF_OPT_SIZE + opt->len;
2929 *ptr += len;
2931 *type = opt->type;
2932 *olen = opt->len;
2934 switch (opt->len) {
2935 case 1:
2936 *val = *((u8 *) opt->val);
2937 break;
2939 case 2:
2940 *val = get_unaligned_le16(opt->val);
2941 break;
2943 case 4:
2944 *val = get_unaligned_le32(opt->val);
2945 break;
2947 default:
2948 *val = (unsigned long) opt->val;
2949 break;
2952 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2953 return len;
2956 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2958 struct l2cap_conf_opt *opt = *ptr;
2960 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2962 opt->type = type;
2963 opt->len = len;
2965 switch (len) {
2966 case 1:
2967 *((u8 *) opt->val) = val;
2968 break;
2970 case 2:
2971 put_unaligned_le16(val, opt->val);
2972 break;
2974 case 4:
2975 put_unaligned_le32(val, opt->val);
2976 break;
2978 default:
2979 memcpy(opt->val, (void *) val, len);
2980 break;
2983 *ptr += L2CAP_CONF_OPT_SIZE + len;
2986 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2988 struct l2cap_conf_efs efs;
2990 switch (chan->mode) {
2991 case L2CAP_MODE_ERTM:
2992 efs.id = chan->local_id;
2993 efs.stype = chan->local_stype;
2994 efs.msdu = cpu_to_le16(chan->local_msdu);
2995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2996 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2997 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2998 break;
3000 case L2CAP_MODE_STREAMING:
3001 efs.id = 1;
3002 efs.stype = L2CAP_SERV_BESTEFFORT;
3003 efs.msdu = cpu_to_le16(chan->local_msdu);
3004 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3005 efs.acc_lat = 0;
3006 efs.flush_to = 0;
3007 break;
3009 default:
3010 return;
3013 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3014 (unsigned long) &efs);
3017 static void l2cap_ack_timeout(struct work_struct *work)
3019 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3020 ack_timer.work);
3021 u16 frames_to_ack;
3023 BT_DBG("chan %p", chan);
3025 l2cap_chan_lock(chan);
3027 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3028 chan->last_acked_seq);
3030 if (frames_to_ack)
3031 l2cap_send_rr_or_rnr(chan, 0);
3033 l2cap_chan_unlock(chan);
3034 l2cap_chan_put(chan);
3037 int l2cap_ertm_init(struct l2cap_chan *chan)
3039 int err;
3041 chan->next_tx_seq = 0;
3042 chan->expected_tx_seq = 0;
3043 chan->expected_ack_seq = 0;
3044 chan->unacked_frames = 0;
3045 chan->buffer_seq = 0;
3046 chan->frames_sent = 0;
3047 chan->last_acked_seq = 0;
3048 chan->sdu = NULL;
3049 chan->sdu_last_frag = NULL;
3050 chan->sdu_len = 0;
3052 skb_queue_head_init(&chan->tx_q);
3054 chan->local_amp_id = AMP_ID_BREDR;
3055 chan->move_id = AMP_ID_BREDR;
3056 chan->move_state = L2CAP_MOVE_STABLE;
3057 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3059 if (chan->mode != L2CAP_MODE_ERTM)
3060 return 0;
3062 chan->rx_state = L2CAP_RX_STATE_RECV;
3063 chan->tx_state = L2CAP_TX_STATE_XMIT;
3065 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3066 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3067 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3069 skb_queue_head_init(&chan->srej_q);
3071 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3072 if (err < 0)
3073 return err;
3075 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3076 if (err < 0)
3077 l2cap_seq_list_free(&chan->srej_list);
3079 return err;
3082 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3084 switch (mode) {
3085 case L2CAP_MODE_STREAMING:
3086 case L2CAP_MODE_ERTM:
3087 if (l2cap_mode_supported(mode, remote_feat_mask))
3088 return mode;
3089 /* fall through */
3090 default:
3091 return L2CAP_MODE_BASIC;
3095 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3097 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3098 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3101 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3103 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3104 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3107 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3108 struct l2cap_conf_rfc *rfc)
3110 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3111 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3113 /* Class 1 devices have must have ERTM timeouts
3114 * exceeding the Link Supervision Timeout. The
3115 * default Link Supervision Timeout for AMP
3116 * controllers is 10 seconds.
3118 * Class 1 devices use 0xffffffff for their
3119 * best-effort flush timeout, so the clamping logic
3120 * will result in a timeout that meets the above
3121 * requirement. ERTM timeouts are 16-bit values, so
3122 * the maximum timeout is 65.535 seconds.
3125 /* Convert timeout to milliseconds and round */
3126 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3128 /* This is the recommended formula for class 2 devices
3129 * that start ERTM timers when packets are sent to the
3130 * controller.
3132 ertm_to = 3 * ertm_to + 500;
3134 if (ertm_to > 0xffff)
3135 ertm_to = 0xffff;
3137 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3138 rfc->monitor_timeout = rfc->retrans_timeout;
3139 } else {
3140 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3141 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3145 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3147 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3148 __l2cap_ews_supported(chan->conn)) {
3149 /* use extended control field */
3150 set_bit(FLAG_EXT_CTRL, &chan->flags);
3151 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3152 } else {
3153 chan->tx_win = min_t(u16, chan->tx_win,
3154 L2CAP_DEFAULT_TX_WINDOW);
3155 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3157 chan->ack_win = chan->tx_win;
3160 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3162 struct l2cap_conf_req *req = data;
3163 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3164 void *ptr = req->data;
3165 u16 size;
3167 BT_DBG("chan %p", chan);
3169 if (chan->num_conf_req || chan->num_conf_rsp)
3170 goto done;
3172 switch (chan->mode) {
3173 case L2CAP_MODE_STREAMING:
3174 case L2CAP_MODE_ERTM:
3175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3176 break;
3178 if (__l2cap_efs_supported(chan->conn))
3179 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3181 /* fall through */
3182 default:
3183 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3184 break;
3187 done:
3188 if (chan->imtu != L2CAP_DEFAULT_MTU)
3189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3191 switch (chan->mode) {
3192 case L2CAP_MODE_BASIC:
3193 if (disable_ertm)
3194 break;
3196 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3197 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3198 break;
3200 rfc.mode = L2CAP_MODE_BASIC;
3201 rfc.txwin_size = 0;
3202 rfc.max_transmit = 0;
3203 rfc.retrans_timeout = 0;
3204 rfc.monitor_timeout = 0;
3205 rfc.max_pdu_size = 0;
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 (unsigned long) &rfc);
3209 break;
3211 case L2CAP_MODE_ERTM:
3212 rfc.mode = L2CAP_MODE_ERTM;
3213 rfc.max_transmit = chan->max_tx;
3215 __l2cap_set_ertm_timeouts(chan, &rfc);
3217 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3218 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3219 L2CAP_FCS_SIZE);
3220 rfc.max_pdu_size = cpu_to_le16(size);
3222 l2cap_txwin_setup(chan);
3224 rfc.txwin_size = min_t(u16, chan->tx_win,
3225 L2CAP_DEFAULT_TX_WINDOW);
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3228 (unsigned long) &rfc);
3230 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3231 l2cap_add_opt_efs(&ptr, chan);
3233 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3235 chan->tx_win);
3237 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3238 if (chan->fcs == L2CAP_FCS_NONE ||
3239 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3240 chan->fcs = L2CAP_FCS_NONE;
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3242 chan->fcs);
3244 break;
3246 case L2CAP_MODE_STREAMING:
3247 l2cap_txwin_setup(chan);
3248 rfc.mode = L2CAP_MODE_STREAMING;
3249 rfc.txwin_size = 0;
3250 rfc.max_transmit = 0;
3251 rfc.retrans_timeout = 0;
3252 rfc.monitor_timeout = 0;
3254 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3255 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3256 L2CAP_FCS_SIZE);
3257 rfc.max_pdu_size = cpu_to_le16(size);
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3260 (unsigned long) &rfc);
3262 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3263 l2cap_add_opt_efs(&ptr, chan);
3265 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3266 if (chan->fcs == L2CAP_FCS_NONE ||
3267 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3268 chan->fcs = L2CAP_FCS_NONE;
3269 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 chan->fcs);
3272 break;
3275 req->dcid = cpu_to_le16(chan->dcid);
3276 req->flags = cpu_to_le16(0);
3278 return ptr - data;
3281 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3283 struct l2cap_conf_rsp *rsp = data;
3284 void *ptr = rsp->data;
3285 void *req = chan->conf_req;
3286 int len = chan->conf_len;
3287 int type, hint, olen;
3288 unsigned long val;
3289 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3290 struct l2cap_conf_efs efs;
3291 u8 remote_efs = 0;
3292 u16 mtu = L2CAP_DEFAULT_MTU;
3293 u16 result = L2CAP_CONF_SUCCESS;
3294 u16 size;
3296 BT_DBG("chan %p", chan);
3298 while (len >= L2CAP_CONF_OPT_SIZE) {
3299 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3301 hint = type & L2CAP_CONF_HINT;
3302 type &= L2CAP_CONF_MASK;
3304 switch (type) {
3305 case L2CAP_CONF_MTU:
3306 mtu = val;
3307 break;
3309 case L2CAP_CONF_FLUSH_TO:
3310 chan->flush_to = val;
3311 break;
3313 case L2CAP_CONF_QOS:
3314 break;
3316 case L2CAP_CONF_RFC:
3317 if (olen == sizeof(rfc))
3318 memcpy(&rfc, (void *) val, olen);
3319 break;
3321 case L2CAP_CONF_FCS:
3322 if (val == L2CAP_FCS_NONE)
3323 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3324 break;
3326 case L2CAP_CONF_EFS:
3327 remote_efs = 1;
3328 if (olen == sizeof(efs))
3329 memcpy(&efs, (void *) val, olen);
3330 break;
3332 case L2CAP_CONF_EWS:
3333 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3334 return -ECONNREFUSED;
3336 set_bit(FLAG_EXT_CTRL, &chan->flags);
3337 set_bit(CONF_EWS_RECV, &chan->conf_state);
3338 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3339 chan->remote_tx_win = val;
3340 break;
3342 default:
3343 if (hint)
3344 break;
3346 result = L2CAP_CONF_UNKNOWN;
3347 *((u8 *) ptr++) = type;
3348 break;
3352 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3353 goto done;
3355 switch (chan->mode) {
3356 case L2CAP_MODE_STREAMING:
3357 case L2CAP_MODE_ERTM:
3358 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3359 chan->mode = l2cap_select_mode(rfc.mode,
3360 chan->conn->feat_mask);
3361 break;
3364 if (remote_efs) {
3365 if (__l2cap_efs_supported(chan->conn))
3366 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3367 else
3368 return -ECONNREFUSED;
3371 if (chan->mode != rfc.mode)
3372 return -ECONNREFUSED;
3374 break;
3377 done:
3378 if (chan->mode != rfc.mode) {
3379 result = L2CAP_CONF_UNACCEPT;
3380 rfc.mode = chan->mode;
3382 if (chan->num_conf_rsp == 1)
3383 return -ECONNREFUSED;
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3386 (unsigned long) &rfc);
3389 if (result == L2CAP_CONF_SUCCESS) {
3390 /* Configure output options and let the other side know
3391 * which ones we don't like. */
3393 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3394 result = L2CAP_CONF_UNACCEPT;
3395 else {
3396 chan->omtu = mtu;
3397 set_bit(CONF_MTU_DONE, &chan->conf_state);
3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3401 if (remote_efs) {
3402 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3403 efs.stype != L2CAP_SERV_NOTRAFIC &&
3404 efs.stype != chan->local_stype) {
3406 result = L2CAP_CONF_UNACCEPT;
3408 if (chan->num_conf_req >= 1)
3409 return -ECONNREFUSED;
3411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3412 sizeof(efs),
3413 (unsigned long) &efs);
3414 } else {
3415 /* Send PENDING Conf Rsp */
3416 result = L2CAP_CONF_PENDING;
3417 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3421 switch (rfc.mode) {
3422 case L2CAP_MODE_BASIC:
3423 chan->fcs = L2CAP_FCS_NONE;
3424 set_bit(CONF_MODE_DONE, &chan->conf_state);
3425 break;
3427 case L2CAP_MODE_ERTM:
3428 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3429 chan->remote_tx_win = rfc.txwin_size;
3430 else
3431 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3433 chan->remote_max_tx = rfc.max_transmit;
3435 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3436 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3437 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3438 rfc.max_pdu_size = cpu_to_le16(size);
3439 chan->remote_mps = size;
3441 __l2cap_set_ertm_timeouts(chan, &rfc);
3443 set_bit(CONF_MODE_DONE, &chan->conf_state);
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3446 sizeof(rfc), (unsigned long) &rfc);
3448 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3449 chan->remote_id = efs.id;
3450 chan->remote_stype = efs.stype;
3451 chan->remote_msdu = le16_to_cpu(efs.msdu);
3452 chan->remote_flush_to =
3453 le32_to_cpu(efs.flush_to);
3454 chan->remote_acc_lat =
3455 le32_to_cpu(efs.acc_lat);
3456 chan->remote_sdu_itime =
3457 le32_to_cpu(efs.sdu_itime);
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3459 sizeof(efs),
3460 (unsigned long) &efs);
3462 break;
3464 case L2CAP_MODE_STREAMING:
3465 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3466 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3467 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3468 rfc.max_pdu_size = cpu_to_le16(size);
3469 chan->remote_mps = size;
3471 set_bit(CONF_MODE_DONE, &chan->conf_state);
3473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3474 (unsigned long) &rfc);
3476 break;
3478 default:
3479 result = L2CAP_CONF_UNACCEPT;
3481 memset(&rfc, 0, sizeof(rfc));
3482 rfc.mode = chan->mode;
3485 if (result == L2CAP_CONF_SUCCESS)
3486 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3488 rsp->scid = cpu_to_le16(chan->dcid);
3489 rsp->result = cpu_to_le16(result);
3490 rsp->flags = cpu_to_le16(0);
3492 return ptr - data;
3495 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3496 void *data, u16 *result)
3498 struct l2cap_conf_req *req = data;
3499 void *ptr = req->data;
3500 int type, olen;
3501 unsigned long val;
3502 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3503 struct l2cap_conf_efs efs;
3505 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3507 while (len >= L2CAP_CONF_OPT_SIZE) {
3508 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3510 switch (type) {
3511 case L2CAP_CONF_MTU:
3512 if (val < L2CAP_DEFAULT_MIN_MTU) {
3513 *result = L2CAP_CONF_UNACCEPT;
3514 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3515 } else
3516 chan->imtu = val;
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3518 break;
3520 case L2CAP_CONF_FLUSH_TO:
3521 chan->flush_to = val;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3523 2, chan->flush_to);
3524 break;
3526 case L2CAP_CONF_RFC:
3527 if (olen == sizeof(rfc))
3528 memcpy(&rfc, (void *)val, olen);
3530 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3531 rfc.mode != chan->mode)
3532 return -ECONNREFUSED;
3534 chan->fcs = 0;
3536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3537 sizeof(rfc), (unsigned long) &rfc);
3538 break;
3540 case L2CAP_CONF_EWS:
3541 chan->ack_win = min_t(u16, val, chan->ack_win);
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 chan->tx_win);
3544 break;
3546 case L2CAP_CONF_EFS:
3547 if (olen == sizeof(efs))
3548 memcpy(&efs, (void *)val, olen);
3550 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3551 efs.stype != L2CAP_SERV_NOTRAFIC &&
3552 efs.stype != chan->local_stype)
3553 return -ECONNREFUSED;
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3556 (unsigned long) &efs);
3557 break;
3559 case L2CAP_CONF_FCS:
3560 if (*result == L2CAP_CONF_PENDING)
3561 if (val == L2CAP_FCS_NONE)
3562 set_bit(CONF_RECV_NO_FCS,
3563 &chan->conf_state);
3564 break;
3568 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3569 return -ECONNREFUSED;
3571 chan->mode = rfc.mode;
3573 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3574 switch (rfc.mode) {
3575 case L2CAP_MODE_ERTM:
3576 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3577 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3578 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3579 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3580 chan->ack_win = min_t(u16, chan->ack_win,
3581 rfc.txwin_size);
3583 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3584 chan->local_msdu = le16_to_cpu(efs.msdu);
3585 chan->local_sdu_itime =
3586 le32_to_cpu(efs.sdu_itime);
3587 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3588 chan->local_flush_to =
3589 le32_to_cpu(efs.flush_to);
3591 break;
3593 case L2CAP_MODE_STREAMING:
3594 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3598 req->dcid = cpu_to_le16(chan->dcid);
3599 req->flags = cpu_to_le16(0);
3601 return ptr - data;
3604 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3605 u16 result, u16 flags)
3607 struct l2cap_conf_rsp *rsp = data;
3608 void *ptr = rsp->data;
3610 BT_DBG("chan %p", chan);
3612 rsp->scid = cpu_to_le16(chan->dcid);
3613 rsp->result = cpu_to_le16(result);
3614 rsp->flags = cpu_to_le16(flags);
3616 return ptr - data;
3619 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3621 struct l2cap_le_conn_rsp rsp;
3622 struct l2cap_conn *conn = chan->conn;
3624 BT_DBG("chan %p", chan);
3626 rsp.dcid = cpu_to_le16(chan->scid);
3627 rsp.mtu = cpu_to_le16(chan->imtu);
3628 rsp.mps = cpu_to_le16(chan->mps);
3629 rsp.credits = cpu_to_le16(chan->rx_credits);
3630 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3632 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3633 &rsp);
3636 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3638 struct l2cap_conn_rsp rsp;
3639 struct l2cap_conn *conn = chan->conn;
3640 u8 buf[128];
3641 u8 rsp_code;
3643 rsp.scid = cpu_to_le16(chan->dcid);
3644 rsp.dcid = cpu_to_le16(chan->scid);
3645 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3646 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3648 if (chan->hs_hcon)
3649 rsp_code = L2CAP_CREATE_CHAN_RSP;
3650 else
3651 rsp_code = L2CAP_CONN_RSP;
3653 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3655 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3657 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3658 return;
3660 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3661 l2cap_build_conf_req(chan, buf), buf);
3662 chan->num_conf_req++;
3665 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3667 int type, olen;
3668 unsigned long val;
3669 /* Use sane default values in case a misbehaving remote device
3670 * did not send an RFC or extended window size option.
3672 u16 txwin_ext = chan->ack_win;
3673 struct l2cap_conf_rfc rfc = {
3674 .mode = chan->mode,
3675 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3676 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3677 .max_pdu_size = cpu_to_le16(chan->imtu),
3678 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3681 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3683 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3684 return;
3686 while (len >= L2CAP_CONF_OPT_SIZE) {
3687 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3689 switch (type) {
3690 case L2CAP_CONF_RFC:
3691 if (olen == sizeof(rfc))
3692 memcpy(&rfc, (void *)val, olen);
3693 break;
3694 case L2CAP_CONF_EWS:
3695 txwin_ext = val;
3696 break;
3700 switch (rfc.mode) {
3701 case L2CAP_MODE_ERTM:
3702 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3703 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3704 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3705 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3706 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3707 else
3708 chan->ack_win = min_t(u16, chan->ack_win,
3709 rfc.txwin_size);
3710 break;
3711 case L2CAP_MODE_STREAMING:
3712 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3716 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3717 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3718 u8 *data)
3720 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3722 if (cmd_len < sizeof(*rej))
3723 return -EPROTO;
3725 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3726 return 0;
3728 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3729 cmd->ident == conn->info_ident) {
3730 cancel_delayed_work(&conn->info_timer);
3732 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3733 conn->info_ident = 0;
3735 l2cap_conn_start(conn);
3738 return 0;
3741 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3742 struct l2cap_cmd_hdr *cmd,
3743 u8 *data, u8 rsp_code, u8 amp_id)
3745 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3746 struct l2cap_conn_rsp rsp;
3747 struct l2cap_chan *chan = NULL, *pchan;
3748 int result, status = L2CAP_CS_NO_INFO;
3750 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3751 __le16 psm = req->psm;
3753 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3755 /* Check if we have socket listening on psm */
3756 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3757 &conn->hcon->dst, ACL_LINK);
3758 if (!pchan) {
3759 result = L2CAP_CR_BAD_PSM;
3760 goto sendresp;
3763 mutex_lock(&conn->chan_lock);
3764 l2cap_chan_lock(pchan);
3766 /* Check if the ACL is secure enough (if not SDP) */
3767 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3768 !hci_conn_check_link_mode(conn->hcon)) {
3769 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3770 result = L2CAP_CR_SEC_BLOCK;
3771 goto response;
3774 result = L2CAP_CR_NO_MEM;
3776 /* Check if we already have channel with that dcid */
3777 if (__l2cap_get_chan_by_dcid(conn, scid))
3778 goto response;
3780 chan = pchan->ops->new_connection(pchan);
3781 if (!chan)
3782 goto response;
3784 /* For certain devices (ex: HID mouse), support for authentication,
3785 * pairing and bonding is optional. For such devices, inorder to avoid
3786 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3787 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3789 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3791 bacpy(&chan->src, &conn->hcon->src);
3792 bacpy(&chan->dst, &conn->hcon->dst);
3793 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3794 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3795 chan->psm = psm;
3796 chan->dcid = scid;
3797 chan->local_amp_id = amp_id;
3799 __l2cap_chan_add(conn, chan);
3801 dcid = chan->scid;
3803 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3805 chan->ident = cmd->ident;
3807 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3808 if (l2cap_chan_check_security(chan, false)) {
3809 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3810 l2cap_state_change(chan, BT_CONNECT2);
3811 result = L2CAP_CR_PEND;
3812 status = L2CAP_CS_AUTHOR_PEND;
3813 chan->ops->defer(chan);
3814 } else {
3815 /* Force pending result for AMP controllers.
3816 * The connection will succeed after the
3817 * physical link is up.
3819 if (amp_id == AMP_ID_BREDR) {
3820 l2cap_state_change(chan, BT_CONFIG);
3821 result = L2CAP_CR_SUCCESS;
3822 } else {
3823 l2cap_state_change(chan, BT_CONNECT2);
3824 result = L2CAP_CR_PEND;
3826 status = L2CAP_CS_NO_INFO;
3828 } else {
3829 l2cap_state_change(chan, BT_CONNECT2);
3830 result = L2CAP_CR_PEND;
3831 status = L2CAP_CS_AUTHEN_PEND;
3833 } else {
3834 l2cap_state_change(chan, BT_CONNECT2);
3835 result = L2CAP_CR_PEND;
3836 status = L2CAP_CS_NO_INFO;
3839 response:
3840 l2cap_chan_unlock(pchan);
3841 mutex_unlock(&conn->chan_lock);
3842 l2cap_chan_put(pchan);
3844 sendresp:
3845 rsp.scid = cpu_to_le16(scid);
3846 rsp.dcid = cpu_to_le16(dcid);
3847 rsp.result = cpu_to_le16(result);
3848 rsp.status = cpu_to_le16(status);
3849 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3851 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3852 struct l2cap_info_req info;
3853 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3855 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3856 conn->info_ident = l2cap_get_ident(conn);
3858 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3860 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3861 sizeof(info), &info);
3864 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3865 result == L2CAP_CR_SUCCESS) {
3866 u8 buf[128];
3867 set_bit(CONF_REQ_SENT, &chan->conf_state);
3868 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3869 l2cap_build_conf_req(chan, buf), buf);
3870 chan->num_conf_req++;
3873 return chan;
3876 static int l2cap_connect_req(struct l2cap_conn *conn,
3877 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3879 struct hci_dev *hdev = conn->hcon->hdev;
3880 struct hci_conn *hcon = conn->hcon;
3882 if (cmd_len < sizeof(struct l2cap_conn_req))
3883 return -EPROTO;
3885 hci_dev_lock(hdev);
3886 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3887 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3888 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3889 hci_dev_unlock(hdev);
3891 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3892 return 0;
3895 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3896 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3897 u8 *data)
3899 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3900 u16 scid, dcid, result, status;
3901 struct l2cap_chan *chan;
3902 u8 req[128];
3903 int err;
3905 if (cmd_len < sizeof(*rsp))
3906 return -EPROTO;
3908 scid = __le16_to_cpu(rsp->scid);
3909 dcid = __le16_to_cpu(rsp->dcid);
3910 result = __le16_to_cpu(rsp->result);
3911 status = __le16_to_cpu(rsp->status);
3913 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 dcid, scid, result, status);
3916 mutex_lock(&conn->chan_lock);
3918 if (scid) {
3919 chan = __l2cap_get_chan_by_scid(conn, scid);
3920 if (!chan) {
3921 err = -EBADSLT;
3922 goto unlock;
3924 } else {
3925 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3926 if (!chan) {
3927 err = -EBADSLT;
3928 goto unlock;
3932 err = 0;
3934 l2cap_chan_lock(chan);
3936 switch (result) {
3937 case L2CAP_CR_SUCCESS:
3938 l2cap_state_change(chan, BT_CONFIG);
3939 chan->ident = 0;
3940 chan->dcid = dcid;
3941 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3943 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3944 break;
3946 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3947 l2cap_build_conf_req(chan, req), req);
3948 chan->num_conf_req++;
3949 break;
3951 case L2CAP_CR_PEND:
3952 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3953 break;
3955 default:
3956 l2cap_chan_del(chan, ECONNREFUSED);
3957 break;
3960 l2cap_chan_unlock(chan);
3962 unlock:
3963 mutex_unlock(&conn->chan_lock);
3965 return err;
3968 static inline void set_default_fcs(struct l2cap_chan *chan)
3970 /* FCS is enabled only in ERTM or streaming mode, if one or both
3971 * sides request it.
3973 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3974 chan->fcs = L2CAP_FCS_NONE;
3975 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3976 chan->fcs = L2CAP_FCS_CRC16;
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3980 u8 ident, u16 flags)
3982 struct l2cap_conn *conn = chan->conn;
3984 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3985 flags);
3987 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3988 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3990 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3991 l2cap_build_conf_rsp(chan, data,
3992 L2CAP_CONF_SUCCESS, flags), data);
3995 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3996 u16 scid, u16 dcid)
3998 struct l2cap_cmd_rej_cid rej;
4000 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4001 rej.scid = __cpu_to_le16(scid);
4002 rej.dcid = __cpu_to_le16(dcid);
4004 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4007 static inline int l2cap_config_req(struct l2cap_conn *conn,
4008 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4009 u8 *data)
4011 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4012 u16 dcid, flags;
4013 u8 rsp[64];
4014 struct l2cap_chan *chan;
4015 int len, err = 0;
4017 if (cmd_len < sizeof(*req))
4018 return -EPROTO;
4020 dcid = __le16_to_cpu(req->dcid);
4021 flags = __le16_to_cpu(req->flags);
4023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4025 chan = l2cap_get_chan_by_scid(conn, dcid);
4026 if (!chan) {
4027 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4028 return 0;
4031 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4032 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4033 chan->dcid);
4034 goto unlock;
4037 /* Reject if config buffer is too small. */
4038 len = cmd_len - sizeof(*req);
4039 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4040 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4041 l2cap_build_conf_rsp(chan, rsp,
4042 L2CAP_CONF_REJECT, flags), rsp);
4043 goto unlock;
4046 /* Store config. */
4047 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4048 chan->conf_len += len;
4050 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4051 /* Incomplete config. Send empty response. */
4052 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4053 l2cap_build_conf_rsp(chan, rsp,
4054 L2CAP_CONF_SUCCESS, flags), rsp);
4055 goto unlock;
4058 /* Complete config. */
4059 len = l2cap_parse_conf_req(chan, rsp);
4060 if (len < 0) {
4061 l2cap_send_disconn_req(chan, ECONNRESET);
4062 goto unlock;
4065 chan->ident = cmd->ident;
4066 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4067 chan->num_conf_rsp++;
4069 /* Reset config buffer. */
4070 chan->conf_len = 0;
4072 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4073 goto unlock;
4075 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4076 set_default_fcs(chan);
4078 if (chan->mode == L2CAP_MODE_ERTM ||
4079 chan->mode == L2CAP_MODE_STREAMING)
4080 err = l2cap_ertm_init(chan);
4082 if (err < 0)
4083 l2cap_send_disconn_req(chan, -err);
4084 else
4085 l2cap_chan_ready(chan);
4087 goto unlock;
4090 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4091 u8 buf[64];
4092 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4093 l2cap_build_conf_req(chan, buf), buf);
4094 chan->num_conf_req++;
4097 /* Got Conf Rsp PENDING from remote side and assume we sent
4098 Conf Rsp PENDING in the code above */
4099 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4100 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4102 /* check compatibility */
4104 /* Send rsp for BR/EDR channel */
4105 if (!chan->hs_hcon)
4106 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4107 else
4108 chan->ident = cmd->ident;
4111 unlock:
4112 l2cap_chan_unlock(chan);
4113 return err;
4116 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4117 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4118 u8 *data)
4120 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4121 u16 scid, flags, result;
4122 struct l2cap_chan *chan;
4123 int len = cmd_len - sizeof(*rsp);
4124 int err = 0;
4126 if (cmd_len < sizeof(*rsp))
4127 return -EPROTO;
4129 scid = __le16_to_cpu(rsp->scid);
4130 flags = __le16_to_cpu(rsp->flags);
4131 result = __le16_to_cpu(rsp->result);
4133 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4134 result, len);
4136 chan = l2cap_get_chan_by_scid(conn, scid);
4137 if (!chan)
4138 return 0;
4140 switch (result) {
4141 case L2CAP_CONF_SUCCESS:
4142 l2cap_conf_rfc_get(chan, rsp->data, len);
4143 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4144 break;
4146 case L2CAP_CONF_PENDING:
4147 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4149 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4150 char buf[64];
4152 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4153 buf, &result);
4154 if (len < 0) {
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4156 goto done;
4159 if (!chan->hs_hcon) {
4160 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4162 } else {
4163 if (l2cap_check_efs(chan)) {
4164 amp_create_logical_link(chan);
4165 chan->ident = cmd->ident;
4169 goto done;
4171 case L2CAP_CONF_UNACCEPT:
4172 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4173 char req[64];
4175 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4177 goto done;
4180 /* throw out any old stored conf requests */
4181 result = L2CAP_CONF_SUCCESS;
4182 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4183 req, &result);
4184 if (len < 0) {
4185 l2cap_send_disconn_req(chan, ECONNRESET);
4186 goto done;
4189 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4190 L2CAP_CONF_REQ, len, req);
4191 chan->num_conf_req++;
4192 if (result != L2CAP_CONF_SUCCESS)
4193 goto done;
4194 break;
4197 default:
4198 l2cap_chan_set_err(chan, ECONNRESET);
4200 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4201 l2cap_send_disconn_req(chan, ECONNRESET);
4202 goto done;
4205 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4206 goto done;
4208 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4210 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4211 set_default_fcs(chan);
4213 if (chan->mode == L2CAP_MODE_ERTM ||
4214 chan->mode == L2CAP_MODE_STREAMING)
4215 err = l2cap_ertm_init(chan);
4217 if (err < 0)
4218 l2cap_send_disconn_req(chan, -err);
4219 else
4220 l2cap_chan_ready(chan);
4223 done:
4224 l2cap_chan_unlock(chan);
4225 return err;
4228 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4229 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4230 u8 *data)
4232 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4233 struct l2cap_disconn_rsp rsp;
4234 u16 dcid, scid;
4235 struct l2cap_chan *chan;
4237 if (cmd_len != sizeof(*req))
4238 return -EPROTO;
4240 scid = __le16_to_cpu(req->scid);
4241 dcid = __le16_to_cpu(req->dcid);
4243 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4245 mutex_lock(&conn->chan_lock);
4247 chan = __l2cap_get_chan_by_scid(conn, dcid);
4248 if (!chan) {
4249 mutex_unlock(&conn->chan_lock);
4250 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4251 return 0;
4254 l2cap_chan_lock(chan);
4256 rsp.dcid = cpu_to_le16(chan->scid);
4257 rsp.scid = cpu_to_le16(chan->dcid);
4258 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4260 chan->ops->set_shutdown(chan);
4262 l2cap_chan_hold(chan);
4263 l2cap_chan_del(chan, ECONNRESET);
4265 l2cap_chan_unlock(chan);
4267 chan->ops->close(chan);
4268 l2cap_chan_put(chan);
4270 mutex_unlock(&conn->chan_lock);
4272 return 0;
4275 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 u8 *data)
4279 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4280 u16 dcid, scid;
4281 struct l2cap_chan *chan;
4283 if (cmd_len != sizeof(*rsp))
4284 return -EPROTO;
4286 scid = __le16_to_cpu(rsp->scid);
4287 dcid = __le16_to_cpu(rsp->dcid);
4289 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4291 mutex_lock(&conn->chan_lock);
4293 chan = __l2cap_get_chan_by_scid(conn, scid);
4294 if (!chan) {
4295 mutex_unlock(&conn->chan_lock);
4296 return 0;
4299 l2cap_chan_lock(chan);
4301 l2cap_chan_hold(chan);
4302 l2cap_chan_del(chan, 0);
4304 l2cap_chan_unlock(chan);
4306 chan->ops->close(chan);
4307 l2cap_chan_put(chan);
4309 mutex_unlock(&conn->chan_lock);
4311 return 0;
4314 static inline int l2cap_information_req(struct l2cap_conn *conn,
4315 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4316 u8 *data)
4318 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4319 u16 type;
4321 if (cmd_len != sizeof(*req))
4322 return -EPROTO;
4324 type = __le16_to_cpu(req->type);
4326 BT_DBG("type 0x%4.4x", type);
4328 if (type == L2CAP_IT_FEAT_MASK) {
4329 u8 buf[8];
4330 u32 feat_mask = l2cap_feat_mask;
4331 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4332 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4333 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4334 if (!disable_ertm)
4335 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4336 | L2CAP_FEAT_FCS;
4337 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4338 feat_mask |= L2CAP_FEAT_EXT_FLOW
4339 | L2CAP_FEAT_EXT_WINDOW;
4341 put_unaligned_le32(feat_mask, rsp->data);
4342 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4343 buf);
4344 } else if (type == L2CAP_IT_FIXED_CHAN) {
4345 u8 buf[12];
4346 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4348 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4349 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4350 rsp->data[0] = conn->local_fixed_chan;
4351 memset(rsp->data + 1, 0, 7);
4352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4353 buf);
4354 } else {
4355 struct l2cap_info_rsp rsp;
4356 rsp.type = cpu_to_le16(type);
4357 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4358 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4359 &rsp);
4362 return 0;
4365 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4366 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4367 u8 *data)
4369 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4370 u16 type, result;
4372 if (cmd_len < sizeof(*rsp))
4373 return -EPROTO;
4375 type = __le16_to_cpu(rsp->type);
4376 result = __le16_to_cpu(rsp->result);
4378 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4380 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4381 if (cmd->ident != conn->info_ident ||
4382 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4383 return 0;
4385 cancel_delayed_work(&conn->info_timer);
4387 if (result != L2CAP_IR_SUCCESS) {
4388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4389 conn->info_ident = 0;
4391 l2cap_conn_start(conn);
4393 return 0;
4396 switch (type) {
4397 case L2CAP_IT_FEAT_MASK:
4398 conn->feat_mask = get_unaligned_le32(rsp->data);
4400 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4401 struct l2cap_info_req req;
4402 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4404 conn->info_ident = l2cap_get_ident(conn);
4406 l2cap_send_cmd(conn, conn->info_ident,
4407 L2CAP_INFO_REQ, sizeof(req), &req);
4408 } else {
4409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 conn->info_ident = 0;
4412 l2cap_conn_start(conn);
4414 break;
4416 case L2CAP_IT_FIXED_CHAN:
4417 conn->remote_fixed_chan = rsp->data[0];
4418 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4419 conn->info_ident = 0;
4421 l2cap_conn_start(conn);
4422 break;
4425 return 0;
4428 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4429 struct l2cap_cmd_hdr *cmd,
4430 u16 cmd_len, void *data)
4432 struct l2cap_create_chan_req *req = data;
4433 struct l2cap_create_chan_rsp rsp;
4434 struct l2cap_chan *chan;
4435 struct hci_dev *hdev;
4436 u16 psm, scid;
4438 if (cmd_len != sizeof(*req))
4439 return -EPROTO;
4441 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4442 return -EINVAL;
4444 psm = le16_to_cpu(req->psm);
4445 scid = le16_to_cpu(req->scid);
4447 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4449 /* For controller id 0 make BR/EDR connection */
4450 if (req->amp_id == AMP_ID_BREDR) {
4451 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4452 req->amp_id);
4453 return 0;
4456 /* Validate AMP controller id */
4457 hdev = hci_dev_get(req->amp_id);
4458 if (!hdev)
4459 goto error;
4461 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4462 hci_dev_put(hdev);
4463 goto error;
4466 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4467 req->amp_id);
4468 if (chan) {
4469 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4470 struct hci_conn *hs_hcon;
4472 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4473 &conn->hcon->dst);
4474 if (!hs_hcon) {
4475 hci_dev_put(hdev);
4476 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4477 chan->dcid);
4478 return 0;
4481 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4483 mgr->bredr_chan = chan;
4484 chan->hs_hcon = hs_hcon;
4485 chan->fcs = L2CAP_FCS_NONE;
4486 conn->mtu = hdev->block_mtu;
4489 hci_dev_put(hdev);
4491 return 0;
4493 error:
4494 rsp.dcid = 0;
4495 rsp.scid = cpu_to_le16(scid);
4496 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4497 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4499 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4500 sizeof(rsp), &rsp);
4502 return 0;
4505 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4507 struct l2cap_move_chan_req req;
4508 u8 ident;
4510 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4512 ident = l2cap_get_ident(chan->conn);
4513 chan->ident = ident;
4515 req.icid = cpu_to_le16(chan->scid);
4516 req.dest_amp_id = dest_amp_id;
4518 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4519 &req);
4521 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4524 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4526 struct l2cap_move_chan_rsp rsp;
4528 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4530 rsp.icid = cpu_to_le16(chan->dcid);
4531 rsp.result = cpu_to_le16(result);
4533 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4534 sizeof(rsp), &rsp);
4537 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4539 struct l2cap_move_chan_cfm cfm;
4541 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4543 chan->ident = l2cap_get_ident(chan->conn);
4545 cfm.icid = cpu_to_le16(chan->scid);
4546 cfm.result = cpu_to_le16(result);
4548 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4549 sizeof(cfm), &cfm);
4551 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4554 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4556 struct l2cap_move_chan_cfm cfm;
4558 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4560 cfm.icid = cpu_to_le16(icid);
4561 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4564 sizeof(cfm), &cfm);
4567 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4568 u16 icid)
4570 struct l2cap_move_chan_cfm_rsp rsp;
4572 BT_DBG("icid 0x%4.4x", icid);
4574 rsp.icid = cpu_to_le16(icid);
4575 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4578 static void __release_logical_link(struct l2cap_chan *chan)
4580 chan->hs_hchan = NULL;
4581 chan->hs_hcon = NULL;
4583 /* Placeholder - release the logical link */
4586 static void l2cap_logical_fail(struct l2cap_chan *chan)
4588 /* Logical link setup failed */
4589 if (chan->state != BT_CONNECTED) {
4590 /* Create channel failure, disconnect */
4591 l2cap_send_disconn_req(chan, ECONNRESET);
4592 return;
4595 switch (chan->move_role) {
4596 case L2CAP_MOVE_ROLE_RESPONDER:
4597 l2cap_move_done(chan);
4598 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4599 break;
4600 case L2CAP_MOVE_ROLE_INITIATOR:
4601 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4602 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4603 /* Remote has only sent pending or
4604 * success responses, clean up
4606 l2cap_move_done(chan);
4609 /* Other amp move states imply that the move
4610 * has already aborted
4612 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4613 break;
4617 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4618 struct hci_chan *hchan)
4620 struct l2cap_conf_rsp rsp;
4622 chan->hs_hchan = hchan;
4623 chan->hs_hcon->l2cap_data = chan->conn;
4625 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4627 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4628 int err;
4630 set_default_fcs(chan);
4632 err = l2cap_ertm_init(chan);
4633 if (err < 0)
4634 l2cap_send_disconn_req(chan, -err);
4635 else
4636 l2cap_chan_ready(chan);
4640 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4641 struct hci_chan *hchan)
4643 chan->hs_hcon = hchan->conn;
4644 chan->hs_hcon->l2cap_data = chan->conn;
4646 BT_DBG("move_state %d", chan->move_state);
4648 switch (chan->move_state) {
4649 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4650 /* Move confirm will be sent after a success
4651 * response is received
4653 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4654 break;
4655 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4656 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4657 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4658 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4659 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4660 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4661 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4662 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4663 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4665 break;
4666 default:
4667 /* Move was not in expected state, free the channel */
4668 __release_logical_link(chan);
4670 chan->move_state = L2CAP_MOVE_STABLE;
4674 /* Call with chan locked */
4675 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4676 u8 status)
4678 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4680 if (status) {
4681 l2cap_logical_fail(chan);
4682 __release_logical_link(chan);
4683 return;
4686 if (chan->state != BT_CONNECTED) {
4687 /* Ignore logical link if channel is on BR/EDR */
4688 if (chan->local_amp_id != AMP_ID_BREDR)
4689 l2cap_logical_finish_create(chan, hchan);
4690 } else {
4691 l2cap_logical_finish_move(chan, hchan);
4695 void l2cap_move_start(struct l2cap_chan *chan)
4697 BT_DBG("chan %p", chan);
4699 if (chan->local_amp_id == AMP_ID_BREDR) {
4700 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4701 return;
4702 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4703 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4704 /* Placeholder - start physical link setup */
4705 } else {
4706 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4707 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4708 chan->move_id = 0;
4709 l2cap_move_setup(chan);
4710 l2cap_send_move_chan_req(chan, 0);
4714 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4715 u8 local_amp_id, u8 remote_amp_id)
4717 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4718 local_amp_id, remote_amp_id);
4720 chan->fcs = L2CAP_FCS_NONE;
4722 /* Outgoing channel on AMP */
4723 if (chan->state == BT_CONNECT) {
4724 if (result == L2CAP_CR_SUCCESS) {
4725 chan->local_amp_id = local_amp_id;
4726 l2cap_send_create_chan_req(chan, remote_amp_id);
4727 } else {
4728 /* Revert to BR/EDR connect */
4729 l2cap_send_conn_req(chan);
4732 return;
4735 /* Incoming channel on AMP */
4736 if (__l2cap_no_conn_pending(chan)) {
4737 struct l2cap_conn_rsp rsp;
4738 char buf[128];
4739 rsp.scid = cpu_to_le16(chan->dcid);
4740 rsp.dcid = cpu_to_le16(chan->scid);
4742 if (result == L2CAP_CR_SUCCESS) {
4743 /* Send successful response */
4744 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4745 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4746 } else {
4747 /* Send negative response */
4748 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4749 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4752 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4753 sizeof(rsp), &rsp);
4755 if (result == L2CAP_CR_SUCCESS) {
4756 l2cap_state_change(chan, BT_CONFIG);
4757 set_bit(CONF_REQ_SENT, &chan->conf_state);
4758 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4759 L2CAP_CONF_REQ,
4760 l2cap_build_conf_req(chan, buf), buf);
4761 chan->num_conf_req++;
4766 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4767 u8 remote_amp_id)
4769 l2cap_move_setup(chan);
4770 chan->move_id = local_amp_id;
4771 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4773 l2cap_send_move_chan_req(chan, remote_amp_id);
4776 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4778 struct hci_chan *hchan = NULL;
4780 /* Placeholder - get hci_chan for logical link */
4782 if (hchan) {
4783 if (hchan->state == BT_CONNECTED) {
4784 /* Logical link is ready to go */
4785 chan->hs_hcon = hchan->conn;
4786 chan->hs_hcon->l2cap_data = chan->conn;
4787 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4788 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4790 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4791 } else {
4792 /* Wait for logical link to be ready */
4793 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4795 } else {
4796 /* Logical link not available */
4797 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4801 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4803 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4804 u8 rsp_result;
4805 if (result == -EINVAL)
4806 rsp_result = L2CAP_MR_BAD_ID;
4807 else
4808 rsp_result = L2CAP_MR_NOT_ALLOWED;
4810 l2cap_send_move_chan_rsp(chan, rsp_result);
4813 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4814 chan->move_state = L2CAP_MOVE_STABLE;
4816 /* Restart data transmission */
4817 l2cap_ertm_send(chan);
4820 /* Invoke with locked chan */
4821 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4823 u8 local_amp_id = chan->local_amp_id;
4824 u8 remote_amp_id = chan->remote_amp_id;
4826 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4827 chan, result, local_amp_id, remote_amp_id);
4829 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4830 l2cap_chan_unlock(chan);
4831 return;
4834 if (chan->state != BT_CONNECTED) {
4835 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4836 } else if (result != L2CAP_MR_SUCCESS) {
4837 l2cap_do_move_cancel(chan, result);
4838 } else {
4839 switch (chan->move_role) {
4840 case L2CAP_MOVE_ROLE_INITIATOR:
4841 l2cap_do_move_initiate(chan, local_amp_id,
4842 remote_amp_id);
4843 break;
4844 case L2CAP_MOVE_ROLE_RESPONDER:
4845 l2cap_do_move_respond(chan, result);
4846 break;
4847 default:
4848 l2cap_do_move_cancel(chan, result);
4849 break;
4854 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4855 struct l2cap_cmd_hdr *cmd,
4856 u16 cmd_len, void *data)
4858 struct l2cap_move_chan_req *req = data;
4859 struct l2cap_move_chan_rsp rsp;
4860 struct l2cap_chan *chan;
4861 u16 icid = 0;
4862 u16 result = L2CAP_MR_NOT_ALLOWED;
4864 if (cmd_len != sizeof(*req))
4865 return -EPROTO;
4867 icid = le16_to_cpu(req->icid);
4869 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4871 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4872 return -EINVAL;
4874 chan = l2cap_get_chan_by_dcid(conn, icid);
4875 if (!chan) {
4876 rsp.icid = cpu_to_le16(icid);
4877 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4878 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4879 sizeof(rsp), &rsp);
4880 return 0;
4883 chan->ident = cmd->ident;
4885 if (chan->scid < L2CAP_CID_DYN_START ||
4886 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4887 (chan->mode != L2CAP_MODE_ERTM &&
4888 chan->mode != L2CAP_MODE_STREAMING)) {
4889 result = L2CAP_MR_NOT_ALLOWED;
4890 goto send_move_response;
4893 if (chan->local_amp_id == req->dest_amp_id) {
4894 result = L2CAP_MR_SAME_ID;
4895 goto send_move_response;
4898 if (req->dest_amp_id != AMP_ID_BREDR) {
4899 struct hci_dev *hdev;
4900 hdev = hci_dev_get(req->dest_amp_id);
4901 if (!hdev || hdev->dev_type != HCI_AMP ||
4902 !test_bit(HCI_UP, &hdev->flags)) {
4903 if (hdev)
4904 hci_dev_put(hdev);
4906 result = L2CAP_MR_BAD_ID;
4907 goto send_move_response;
4909 hci_dev_put(hdev);
4912 /* Detect a move collision. Only send a collision response
4913 * if this side has "lost", otherwise proceed with the move.
4914 * The winner has the larger bd_addr.
4916 if ((__chan_is_moving(chan) ||
4917 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4918 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4919 result = L2CAP_MR_COLLISION;
4920 goto send_move_response;
4923 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4924 l2cap_move_setup(chan);
4925 chan->move_id = req->dest_amp_id;
4926 icid = chan->dcid;
4928 if (req->dest_amp_id == AMP_ID_BREDR) {
4929 /* Moving to BR/EDR */
4930 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4931 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4932 result = L2CAP_MR_PEND;
4933 } else {
4934 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4935 result = L2CAP_MR_SUCCESS;
4937 } else {
4938 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4939 /* Placeholder - uncomment when amp functions are available */
4940 /*amp_accept_physical(chan, req->dest_amp_id);*/
4941 result = L2CAP_MR_PEND;
4944 send_move_response:
4945 l2cap_send_move_chan_rsp(chan, result);
4947 l2cap_chan_unlock(chan);
4949 return 0;
4952 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4954 struct l2cap_chan *chan;
4955 struct hci_chan *hchan = NULL;
4957 chan = l2cap_get_chan_by_scid(conn, icid);
4958 if (!chan) {
4959 l2cap_send_move_chan_cfm_icid(conn, icid);
4960 return;
4963 __clear_chan_timer(chan);
4964 if (result == L2CAP_MR_PEND)
4965 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4967 switch (chan->move_state) {
4968 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4969 /* Move confirm will be sent when logical link
4970 * is complete.
4972 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4973 break;
4974 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4975 if (result == L2CAP_MR_PEND) {
4976 break;
4977 } else if (test_bit(CONN_LOCAL_BUSY,
4978 &chan->conn_state)) {
4979 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4980 } else {
4981 /* Logical link is up or moving to BR/EDR,
4982 * proceed with move
4984 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4985 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4987 break;
4988 case L2CAP_MOVE_WAIT_RSP:
4989 /* Moving to AMP */
4990 if (result == L2CAP_MR_SUCCESS) {
4991 /* Remote is ready, send confirm immediately
4992 * after logical link is ready
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4995 } else {
4996 /* Both logical link and move success
4997 * are required to confirm
4999 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5002 /* Placeholder - get hci_chan for logical link */
5003 if (!hchan) {
5004 /* Logical link not available */
5005 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5006 break;
5009 /* If the logical link is not yet connected, do not
5010 * send confirmation.
5012 if (hchan->state != BT_CONNECTED)
5013 break;
5015 /* Logical link is already ready to go */
5017 chan->hs_hcon = hchan->conn;
5018 chan->hs_hcon->l2cap_data = chan->conn;
5020 if (result == L2CAP_MR_SUCCESS) {
5021 /* Can confirm now */
5022 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5023 } else {
5024 /* Now only need move success
5025 * to confirm
5027 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5030 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5031 break;
5032 default:
5033 /* Any other amp move state means the move failed. */
5034 chan->move_id = chan->local_amp_id;
5035 l2cap_move_done(chan);
5036 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5039 l2cap_chan_unlock(chan);
5042 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5043 u16 result)
5045 struct l2cap_chan *chan;
5047 chan = l2cap_get_chan_by_ident(conn, ident);
5048 if (!chan) {
5049 /* Could not locate channel, icid is best guess */
5050 l2cap_send_move_chan_cfm_icid(conn, icid);
5051 return;
5054 __clear_chan_timer(chan);
5056 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5057 if (result == L2CAP_MR_COLLISION) {
5058 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5059 } else {
5060 /* Cleanup - cancel move */
5061 chan->move_id = chan->local_amp_id;
5062 l2cap_move_done(chan);
5066 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5068 l2cap_chan_unlock(chan);
5071 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5072 struct l2cap_cmd_hdr *cmd,
5073 u16 cmd_len, void *data)
5075 struct l2cap_move_chan_rsp *rsp = data;
5076 u16 icid, result;
5078 if (cmd_len != sizeof(*rsp))
5079 return -EPROTO;
5081 icid = le16_to_cpu(rsp->icid);
5082 result = le16_to_cpu(rsp->result);
5084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5086 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5087 l2cap_move_continue(conn, icid, result);
5088 else
5089 l2cap_move_fail(conn, cmd->ident, icid, result);
5091 return 0;
5094 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5095 struct l2cap_cmd_hdr *cmd,
5096 u16 cmd_len, void *data)
5098 struct l2cap_move_chan_cfm *cfm = data;
5099 struct l2cap_chan *chan;
5100 u16 icid, result;
5102 if (cmd_len != sizeof(*cfm))
5103 return -EPROTO;
5105 icid = le16_to_cpu(cfm->icid);
5106 result = le16_to_cpu(cfm->result);
5108 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5110 chan = l2cap_get_chan_by_dcid(conn, icid);
5111 if (!chan) {
5112 /* Spec requires a response even if the icid was not found */
5113 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5114 return 0;
5117 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5118 if (result == L2CAP_MC_CONFIRMED) {
5119 chan->local_amp_id = chan->move_id;
5120 if (chan->local_amp_id == AMP_ID_BREDR)
5121 __release_logical_link(chan);
5122 } else {
5123 chan->move_id = chan->local_amp_id;
5126 l2cap_move_done(chan);
5129 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5131 l2cap_chan_unlock(chan);
5133 return 0;
5136 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5137 struct l2cap_cmd_hdr *cmd,
5138 u16 cmd_len, void *data)
5140 struct l2cap_move_chan_cfm_rsp *rsp = data;
5141 struct l2cap_chan *chan;
5142 u16 icid;
5144 if (cmd_len != sizeof(*rsp))
5145 return -EPROTO;
5147 icid = le16_to_cpu(rsp->icid);
5149 BT_DBG("icid 0x%4.4x", icid);
5151 chan = l2cap_get_chan_by_scid(conn, icid);
5152 if (!chan)
5153 return 0;
5155 __clear_chan_timer(chan);
5157 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5158 chan->local_amp_id = chan->move_id;
5160 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5161 __release_logical_link(chan);
5163 l2cap_move_done(chan);
5166 l2cap_chan_unlock(chan);
5168 return 0;
5171 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5172 struct l2cap_cmd_hdr *cmd,
5173 u16 cmd_len, u8 *data)
5175 struct hci_conn *hcon = conn->hcon;
5176 struct l2cap_conn_param_update_req *req;
5177 struct l2cap_conn_param_update_rsp rsp;
5178 u16 min, max, latency, to_multiplier;
5179 int err;
5181 if (hcon->role != HCI_ROLE_MASTER)
5182 return -EINVAL;
5184 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5185 return -EPROTO;
5187 req = (struct l2cap_conn_param_update_req *) data;
5188 min = __le16_to_cpu(req->min);
5189 max = __le16_to_cpu(req->max);
5190 latency = __le16_to_cpu(req->latency);
5191 to_multiplier = __le16_to_cpu(req->to_multiplier);
5193 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5194 min, max, latency, to_multiplier);
5196 memset(&rsp, 0, sizeof(rsp));
5198 err = hci_check_conn_params(min, max, latency, to_multiplier);
5199 if (err)
5200 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5201 else
5202 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5204 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5205 sizeof(rsp), &rsp);
5207 if (!err) {
5208 u8 store_hint;
5210 store_hint = hci_le_conn_update(hcon, min, max, latency,
5211 to_multiplier);
5212 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5213 store_hint, min, max, latency,
5214 to_multiplier);
5218 return 0;
5221 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5222 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5223 u8 *data)
5225 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5226 struct hci_conn *hcon = conn->hcon;
5227 u16 dcid, mtu, mps, credits, result;
5228 struct l2cap_chan *chan;
5229 int err, sec_level;
5231 if (cmd_len < sizeof(*rsp))
5232 return -EPROTO;
5234 dcid = __le16_to_cpu(rsp->dcid);
5235 mtu = __le16_to_cpu(rsp->mtu);
5236 mps = __le16_to_cpu(rsp->mps);
5237 credits = __le16_to_cpu(rsp->credits);
5238 result = __le16_to_cpu(rsp->result);
5240 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5241 return -EPROTO;
5243 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5244 dcid, mtu, mps, credits, result);
5246 mutex_lock(&conn->chan_lock);
5248 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5249 if (!chan) {
5250 err = -EBADSLT;
5251 goto unlock;
5254 err = 0;
5256 l2cap_chan_lock(chan);
5258 switch (result) {
5259 case L2CAP_CR_SUCCESS:
5260 chan->ident = 0;
5261 chan->dcid = dcid;
5262 chan->omtu = mtu;
5263 chan->remote_mps = mps;
5264 chan->tx_credits = credits;
5265 l2cap_chan_ready(chan);
5266 break;
5268 case L2CAP_CR_AUTHENTICATION:
5269 case L2CAP_CR_ENCRYPTION:
5270 /* If we already have MITM protection we can't do
5271 * anything.
5273 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5274 l2cap_chan_del(chan, ECONNREFUSED);
5275 break;
5278 sec_level = hcon->sec_level + 1;
5279 if (chan->sec_level < sec_level)
5280 chan->sec_level = sec_level;
5282 /* We'll need to send a new Connect Request */
5283 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5285 smp_conn_security(hcon, chan->sec_level);
5286 break;
5288 default:
5289 l2cap_chan_del(chan, ECONNREFUSED);
5290 break;
5293 l2cap_chan_unlock(chan);
5295 unlock:
5296 mutex_unlock(&conn->chan_lock);
5298 return err;
5301 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5303 u8 *data)
5305 int err = 0;
5307 switch (cmd->code) {
5308 case L2CAP_COMMAND_REJ:
5309 l2cap_command_rej(conn, cmd, cmd_len, data);
5310 break;
5312 case L2CAP_CONN_REQ:
5313 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5314 break;
5316 case L2CAP_CONN_RSP:
5317 case L2CAP_CREATE_CHAN_RSP:
5318 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5319 break;
5321 case L2CAP_CONF_REQ:
5322 err = l2cap_config_req(conn, cmd, cmd_len, data);
5323 break;
5325 case L2CAP_CONF_RSP:
5326 l2cap_config_rsp(conn, cmd, cmd_len, data);
5327 break;
5329 case L2CAP_DISCONN_REQ:
5330 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5331 break;
5333 case L2CAP_DISCONN_RSP:
5334 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5335 break;
5337 case L2CAP_ECHO_REQ:
5338 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5339 break;
5341 case L2CAP_ECHO_RSP:
5342 break;
5344 case L2CAP_INFO_REQ:
5345 err = l2cap_information_req(conn, cmd, cmd_len, data);
5346 break;
5348 case L2CAP_INFO_RSP:
5349 l2cap_information_rsp(conn, cmd, cmd_len, data);
5350 break;
5352 case L2CAP_CREATE_CHAN_REQ:
5353 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5354 break;
5356 case L2CAP_MOVE_CHAN_REQ:
5357 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5358 break;
5360 case L2CAP_MOVE_CHAN_RSP:
5361 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5362 break;
5364 case L2CAP_MOVE_CHAN_CFM:
5365 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5366 break;
5368 case L2CAP_MOVE_CHAN_CFM_RSP:
5369 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5370 break;
5372 default:
5373 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5374 err = -EINVAL;
5375 break;
5378 return err;
5381 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5382 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 u8 *data)
5385 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5386 struct l2cap_le_conn_rsp rsp;
5387 struct l2cap_chan *chan, *pchan;
5388 u16 dcid, scid, credits, mtu, mps;
5389 __le16 psm;
5390 u8 result;
5392 if (cmd_len != sizeof(*req))
5393 return -EPROTO;
5395 scid = __le16_to_cpu(req->scid);
5396 mtu = __le16_to_cpu(req->mtu);
5397 mps = __le16_to_cpu(req->mps);
5398 psm = req->psm;
5399 dcid = 0;
5400 credits = 0;
5402 if (mtu < 23 || mps < 23)
5403 return -EPROTO;
5405 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5406 scid, mtu, mps);
5408 /* Check if we have socket listening on psm */
5409 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5410 &conn->hcon->dst, LE_LINK);
5411 if (!pchan) {
5412 result = L2CAP_CR_BAD_PSM;
5413 chan = NULL;
5414 goto response;
5417 mutex_lock(&conn->chan_lock);
5418 l2cap_chan_lock(pchan);
5420 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5421 SMP_ALLOW_STK)) {
5422 result = L2CAP_CR_AUTHENTICATION;
5423 chan = NULL;
5424 goto response_unlock;
5427 /* Check if we already have channel with that dcid */
5428 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5429 result = L2CAP_CR_NO_MEM;
5430 chan = NULL;
5431 goto response_unlock;
5434 chan = pchan->ops->new_connection(pchan);
5435 if (!chan) {
5436 result = L2CAP_CR_NO_MEM;
5437 goto response_unlock;
5440 l2cap_le_flowctl_init(chan);
5442 bacpy(&chan->src, &conn->hcon->src);
5443 bacpy(&chan->dst, &conn->hcon->dst);
5444 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5445 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5446 chan->psm = psm;
5447 chan->dcid = scid;
5448 chan->omtu = mtu;
5449 chan->remote_mps = mps;
5450 chan->tx_credits = __le16_to_cpu(req->credits);
5452 __l2cap_chan_add(conn, chan);
5453 dcid = chan->scid;
5454 credits = chan->rx_credits;
5456 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5458 chan->ident = cmd->ident;
5460 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5461 l2cap_state_change(chan, BT_CONNECT2);
5462 /* The following result value is actually not defined
5463 * for LE CoC but we use it to let the function know
5464 * that it should bail out after doing its cleanup
5465 * instead of sending a response.
5467 result = L2CAP_CR_PEND;
5468 chan->ops->defer(chan);
5469 } else {
5470 l2cap_chan_ready(chan);
5471 result = L2CAP_CR_SUCCESS;
5474 response_unlock:
5475 l2cap_chan_unlock(pchan);
5476 mutex_unlock(&conn->chan_lock);
5477 l2cap_chan_put(pchan);
5479 if (result == L2CAP_CR_PEND)
5480 return 0;
5482 response:
5483 if (chan) {
5484 rsp.mtu = cpu_to_le16(chan->imtu);
5485 rsp.mps = cpu_to_le16(chan->mps);
5486 } else {
5487 rsp.mtu = 0;
5488 rsp.mps = 0;
5491 rsp.dcid = cpu_to_le16(dcid);
5492 rsp.credits = cpu_to_le16(credits);
5493 rsp.result = cpu_to_le16(result);
5495 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5497 return 0;
5500 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5501 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5502 u8 *data)
5504 struct l2cap_le_credits *pkt;
5505 struct l2cap_chan *chan;
5506 u16 cid, credits, max_credits;
5508 if (cmd_len != sizeof(*pkt))
5509 return -EPROTO;
5511 pkt = (struct l2cap_le_credits *) data;
5512 cid = __le16_to_cpu(pkt->cid);
5513 credits = __le16_to_cpu(pkt->credits);
5515 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5517 chan = l2cap_get_chan_by_dcid(conn, cid);
5518 if (!chan)
5519 return -EBADSLT;
5521 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5522 if (credits > max_credits) {
5523 BT_ERR("LE credits overflow");
5524 l2cap_send_disconn_req(chan, ECONNRESET);
5525 l2cap_chan_unlock(chan);
5527 /* Return 0 so that we don't trigger an unnecessary
5528 * command reject packet.
5530 return 0;
5533 chan->tx_credits += credits;
5535 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5536 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5537 chan->tx_credits--;
5540 if (chan->tx_credits)
5541 chan->ops->resume(chan);
5543 l2cap_chan_unlock(chan);
5545 return 0;
5548 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5549 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5550 u8 *data)
5552 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5553 struct l2cap_chan *chan;
5555 if (cmd_len < sizeof(*rej))
5556 return -EPROTO;
5558 mutex_lock(&conn->chan_lock);
5560 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5561 if (!chan)
5562 goto done;
5564 l2cap_chan_lock(chan);
5565 l2cap_chan_del(chan, ECONNREFUSED);
5566 l2cap_chan_unlock(chan);
5568 done:
5569 mutex_unlock(&conn->chan_lock);
5570 return 0;
5573 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5574 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5575 u8 *data)
5577 int err = 0;
5579 switch (cmd->code) {
5580 case L2CAP_COMMAND_REJ:
5581 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5582 break;
5584 case L2CAP_CONN_PARAM_UPDATE_REQ:
5585 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5586 break;
5588 case L2CAP_CONN_PARAM_UPDATE_RSP:
5589 break;
5591 case L2CAP_LE_CONN_RSP:
5592 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5593 break;
5595 case L2CAP_LE_CONN_REQ:
5596 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5597 break;
5599 case L2CAP_LE_CREDITS:
5600 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5601 break;
5603 case L2CAP_DISCONN_REQ:
5604 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5605 break;
5607 case L2CAP_DISCONN_RSP:
5608 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5609 break;
5611 default:
5612 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5613 err = -EINVAL;
5614 break;
5617 return err;
5620 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5621 struct sk_buff *skb)
5623 struct hci_conn *hcon = conn->hcon;
5624 struct l2cap_cmd_hdr *cmd;
5625 u16 len;
5626 int err;
5628 if (hcon->type != LE_LINK)
5629 goto drop;
5631 if (skb->len < L2CAP_CMD_HDR_SIZE)
5632 goto drop;
5634 cmd = (void *) skb->data;
5635 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5637 len = le16_to_cpu(cmd->len);
5639 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5641 if (len != skb->len || !cmd->ident) {
5642 BT_DBG("corrupted command");
5643 goto drop;
5646 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5647 if (err) {
5648 struct l2cap_cmd_rej_unk rej;
5650 BT_ERR("Wrong link type (%d)", err);
5652 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5653 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5654 sizeof(rej), &rej);
5657 drop:
5658 kfree_skb(skb);
5661 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5662 struct sk_buff *skb)
5664 struct hci_conn *hcon = conn->hcon;
5665 u8 *data = skb->data;
5666 int len = skb->len;
5667 struct l2cap_cmd_hdr cmd;
5668 int err;
5670 l2cap_raw_recv(conn, skb);
5672 if (hcon->type != ACL_LINK)
5673 goto drop;
5675 while (len >= L2CAP_CMD_HDR_SIZE) {
5676 u16 cmd_len;
5677 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5678 data += L2CAP_CMD_HDR_SIZE;
5679 len -= L2CAP_CMD_HDR_SIZE;
5681 cmd_len = le16_to_cpu(cmd.len);
5683 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5684 cmd.ident);
5686 if (cmd_len > len || !cmd.ident) {
5687 BT_DBG("corrupted command");
5688 break;
5691 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5692 if (err) {
5693 struct l2cap_cmd_rej_unk rej;
5695 BT_ERR("Wrong link type (%d)", err);
5697 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5698 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5699 sizeof(rej), &rej);
5702 data += cmd_len;
5703 len -= cmd_len;
5706 drop:
5707 kfree_skb(skb);
5710 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5712 u16 our_fcs, rcv_fcs;
5713 int hdr_size;
5715 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5716 hdr_size = L2CAP_EXT_HDR_SIZE;
5717 else
5718 hdr_size = L2CAP_ENH_HDR_SIZE;
5720 if (chan->fcs == L2CAP_FCS_CRC16) {
5721 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5722 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5723 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5725 if (our_fcs != rcv_fcs)
5726 return -EBADMSG;
5728 return 0;
5731 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5733 struct l2cap_ctrl control;
5735 BT_DBG("chan %p", chan);
5737 memset(&control, 0, sizeof(control));
5738 control.sframe = 1;
5739 control.final = 1;
5740 control.reqseq = chan->buffer_seq;
5741 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5743 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5744 control.super = L2CAP_SUPER_RNR;
5745 l2cap_send_sframe(chan, &control);
5748 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5749 chan->unacked_frames > 0)
5750 __set_retrans_timer(chan);
5752 /* Send pending iframes */
5753 l2cap_ertm_send(chan);
5755 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5756 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5757 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5758 * send it now.
5760 control.super = L2CAP_SUPER_RR;
5761 l2cap_send_sframe(chan, &control);
5765 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5766 struct sk_buff **last_frag)
5768 /* skb->len reflects data in skb as well as all fragments
5769 * skb->data_len reflects only data in fragments
5771 if (!skb_has_frag_list(skb))
5772 skb_shinfo(skb)->frag_list = new_frag;
5774 new_frag->next = NULL;
5776 (*last_frag)->next = new_frag;
5777 *last_frag = new_frag;
5779 skb->len += new_frag->len;
5780 skb->data_len += new_frag->len;
5781 skb->truesize += new_frag->truesize;
5784 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5785 struct l2cap_ctrl *control)
5787 int err = -EINVAL;
5789 switch (control->sar) {
5790 case L2CAP_SAR_UNSEGMENTED:
5791 if (chan->sdu)
5792 break;
5794 err = chan->ops->recv(chan, skb);
5795 break;
5797 case L2CAP_SAR_START:
5798 if (chan->sdu)
5799 break;
5801 chan->sdu_len = get_unaligned_le16(skb->data);
5802 skb_pull(skb, L2CAP_SDULEN_SIZE);
5804 if (chan->sdu_len > chan->imtu) {
5805 err = -EMSGSIZE;
5806 break;
5809 if (skb->len >= chan->sdu_len)
5810 break;
5812 chan->sdu = skb;
5813 chan->sdu_last_frag = skb;
5815 skb = NULL;
5816 err = 0;
5817 break;
5819 case L2CAP_SAR_CONTINUE:
5820 if (!chan->sdu)
5821 break;
5823 append_skb_frag(chan->sdu, skb,
5824 &chan->sdu_last_frag);
5825 skb = NULL;
5827 if (chan->sdu->len >= chan->sdu_len)
5828 break;
5830 err = 0;
5831 break;
5833 case L2CAP_SAR_END:
5834 if (!chan->sdu)
5835 break;
5837 append_skb_frag(chan->sdu, skb,
5838 &chan->sdu_last_frag);
5839 skb = NULL;
5841 if (chan->sdu->len != chan->sdu_len)
5842 break;
5844 err = chan->ops->recv(chan, chan->sdu);
5846 if (!err) {
5847 /* Reassembly complete */
5848 chan->sdu = NULL;
5849 chan->sdu_last_frag = NULL;
5850 chan->sdu_len = 0;
5852 break;
5855 if (err) {
5856 kfree_skb(skb);
5857 kfree_skb(chan->sdu);
5858 chan->sdu = NULL;
5859 chan->sdu_last_frag = NULL;
5860 chan->sdu_len = 0;
5863 return err;
5866 static int l2cap_resegment(struct l2cap_chan *chan)
5868 /* Placeholder */
5869 return 0;
5872 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5874 u8 event;
5876 if (chan->mode != L2CAP_MODE_ERTM)
5877 return;
5879 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5880 l2cap_tx(chan, NULL, NULL, event);
5883 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5885 int err = 0;
5886 /* Pass sequential frames to l2cap_reassemble_sdu()
5887 * until a gap is encountered.
5890 BT_DBG("chan %p", chan);
5892 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5893 struct sk_buff *skb;
5894 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5895 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5897 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5899 if (!skb)
5900 break;
5902 skb_unlink(skb, &chan->srej_q);
5903 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5904 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5905 if (err)
5906 break;
5909 if (skb_queue_empty(&chan->srej_q)) {
5910 chan->rx_state = L2CAP_RX_STATE_RECV;
5911 l2cap_send_ack(chan);
5914 return err;
5917 static void l2cap_handle_srej(struct l2cap_chan *chan,
5918 struct l2cap_ctrl *control)
5920 struct sk_buff *skb;
5922 BT_DBG("chan %p, control %p", chan, control);
5924 if (control->reqseq == chan->next_tx_seq) {
5925 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5926 l2cap_send_disconn_req(chan, ECONNRESET);
5927 return;
5930 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5932 if (skb == NULL) {
5933 BT_DBG("Seq %d not available for retransmission",
5934 control->reqseq);
5935 return;
5938 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5939 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5940 l2cap_send_disconn_req(chan, ECONNRESET);
5941 return;
5944 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5946 if (control->poll) {
5947 l2cap_pass_to_tx(chan, control);
5949 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5950 l2cap_retransmit(chan, control);
5951 l2cap_ertm_send(chan);
5953 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5954 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5955 chan->srej_save_reqseq = control->reqseq;
5957 } else {
5958 l2cap_pass_to_tx_fbit(chan, control);
5960 if (control->final) {
5961 if (chan->srej_save_reqseq != control->reqseq ||
5962 !test_and_clear_bit(CONN_SREJ_ACT,
5963 &chan->conn_state))
5964 l2cap_retransmit(chan, control);
5965 } else {
5966 l2cap_retransmit(chan, control);
5967 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5968 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5969 chan->srej_save_reqseq = control->reqseq;
5975 static void l2cap_handle_rej(struct l2cap_chan *chan,
5976 struct l2cap_ctrl *control)
5978 struct sk_buff *skb;
5980 BT_DBG("chan %p, control %p", chan, control);
5982 if (control->reqseq == chan->next_tx_seq) {
5983 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5984 l2cap_send_disconn_req(chan, ECONNRESET);
5985 return;
5988 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5990 if (chan->max_tx && skb &&
5991 bt_cb(skb)->control.retries >= chan->max_tx) {
5992 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5993 l2cap_send_disconn_req(chan, ECONNRESET);
5994 return;
5997 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5999 l2cap_pass_to_tx(chan, control);
6001 if (control->final) {
6002 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6003 l2cap_retransmit_all(chan, control);
6004 } else {
6005 l2cap_retransmit_all(chan, control);
6006 l2cap_ertm_send(chan);
6007 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6008 set_bit(CONN_REJ_ACT, &chan->conn_state);
6012 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6014 BT_DBG("chan %p, txseq %d", chan, txseq);
6016 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6017 chan->expected_tx_seq);
6019 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6020 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6021 chan->tx_win) {
6022 /* See notes below regarding "double poll" and
6023 * invalid packets.
6025 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6026 BT_DBG("Invalid/Ignore - after SREJ");
6027 return L2CAP_TXSEQ_INVALID_IGNORE;
6028 } else {
6029 BT_DBG("Invalid - in window after SREJ sent");
6030 return L2CAP_TXSEQ_INVALID;
6034 if (chan->srej_list.head == txseq) {
6035 BT_DBG("Expected SREJ");
6036 return L2CAP_TXSEQ_EXPECTED_SREJ;
6039 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6040 BT_DBG("Duplicate SREJ - txseq already stored");
6041 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6044 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6045 BT_DBG("Unexpected SREJ - not requested");
6046 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6050 if (chan->expected_tx_seq == txseq) {
6051 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6052 chan->tx_win) {
6053 BT_DBG("Invalid - txseq outside tx window");
6054 return L2CAP_TXSEQ_INVALID;
6055 } else {
6056 BT_DBG("Expected");
6057 return L2CAP_TXSEQ_EXPECTED;
6061 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6062 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6063 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6064 return L2CAP_TXSEQ_DUPLICATE;
6067 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6068 /* A source of invalid packets is a "double poll" condition,
6069 * where delays cause us to send multiple poll packets. If
6070 * the remote stack receives and processes both polls,
6071 * sequence numbers can wrap around in such a way that a
6072 * resent frame has a sequence number that looks like new data
6073 * with a sequence gap. This would trigger an erroneous SREJ
6074 * request.
6076 * Fortunately, this is impossible with a tx window that's
6077 * less than half of the maximum sequence number, which allows
6078 * invalid frames to be safely ignored.
6080 * With tx window sizes greater than half of the tx window
6081 * maximum, the frame is invalid and cannot be ignored. This
6082 * causes a disconnect.
6085 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6086 BT_DBG("Invalid/Ignore - txseq outside tx window");
6087 return L2CAP_TXSEQ_INVALID_IGNORE;
6088 } else {
6089 BT_DBG("Invalid - txseq outside tx window");
6090 return L2CAP_TXSEQ_INVALID;
6092 } else {
6093 BT_DBG("Unexpected - txseq indicates missing frames");
6094 return L2CAP_TXSEQ_UNEXPECTED;
6098 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6099 struct l2cap_ctrl *control,
6100 struct sk_buff *skb, u8 event)
6102 int err = 0;
6103 bool skb_in_use = false;
6105 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6106 event);
6108 switch (event) {
6109 case L2CAP_EV_RECV_IFRAME:
6110 switch (l2cap_classify_txseq(chan, control->txseq)) {
6111 case L2CAP_TXSEQ_EXPECTED:
6112 l2cap_pass_to_tx(chan, control);
6114 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6115 BT_DBG("Busy, discarding expected seq %d",
6116 control->txseq);
6117 break;
6120 chan->expected_tx_seq = __next_seq(chan,
6121 control->txseq);
6123 chan->buffer_seq = chan->expected_tx_seq;
6124 skb_in_use = true;
6126 err = l2cap_reassemble_sdu(chan, skb, control);
6127 if (err)
6128 break;
6130 if (control->final) {
6131 if (!test_and_clear_bit(CONN_REJ_ACT,
6132 &chan->conn_state)) {
6133 control->final = 0;
6134 l2cap_retransmit_all(chan, control);
6135 l2cap_ertm_send(chan);
6139 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6140 l2cap_send_ack(chan);
6141 break;
6142 case L2CAP_TXSEQ_UNEXPECTED:
6143 l2cap_pass_to_tx(chan, control);
6145 /* Can't issue SREJ frames in the local busy state.
6146 * Drop this frame, it will be seen as missing
6147 * when local busy is exited.
6149 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6150 BT_DBG("Busy, discarding unexpected seq %d",
6151 control->txseq);
6152 break;
6155 /* There was a gap in the sequence, so an SREJ
6156 * must be sent for each missing frame. The
6157 * current frame is stored for later use.
6159 skb_queue_tail(&chan->srej_q, skb);
6160 skb_in_use = true;
6161 BT_DBG("Queued %p (queue len %d)", skb,
6162 skb_queue_len(&chan->srej_q));
6164 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6165 l2cap_seq_list_clear(&chan->srej_list);
6166 l2cap_send_srej(chan, control->txseq);
6168 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6169 break;
6170 case L2CAP_TXSEQ_DUPLICATE:
6171 l2cap_pass_to_tx(chan, control);
6172 break;
6173 case L2CAP_TXSEQ_INVALID_IGNORE:
6174 break;
6175 case L2CAP_TXSEQ_INVALID:
6176 default:
6177 l2cap_send_disconn_req(chan, ECONNRESET);
6178 break;
6180 break;
6181 case L2CAP_EV_RECV_RR:
6182 l2cap_pass_to_tx(chan, control);
6183 if (control->final) {
6184 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6186 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6187 !__chan_is_moving(chan)) {
6188 control->final = 0;
6189 l2cap_retransmit_all(chan, control);
6192 l2cap_ertm_send(chan);
6193 } else if (control->poll) {
6194 l2cap_send_i_or_rr_or_rnr(chan);
6195 } else {
6196 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6197 &chan->conn_state) &&
6198 chan->unacked_frames)
6199 __set_retrans_timer(chan);
6201 l2cap_ertm_send(chan);
6203 break;
6204 case L2CAP_EV_RECV_RNR:
6205 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6206 l2cap_pass_to_tx(chan, control);
6207 if (control && control->poll) {
6208 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6209 l2cap_send_rr_or_rnr(chan, 0);
6211 __clear_retrans_timer(chan);
6212 l2cap_seq_list_clear(&chan->retrans_list);
6213 break;
6214 case L2CAP_EV_RECV_REJ:
6215 l2cap_handle_rej(chan, control);
6216 break;
6217 case L2CAP_EV_RECV_SREJ:
6218 l2cap_handle_srej(chan, control);
6219 break;
6220 default:
6221 break;
6224 if (skb && !skb_in_use) {
6225 BT_DBG("Freeing %p", skb);
6226 kfree_skb(skb);
6229 return err;
6232 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6233 struct l2cap_ctrl *control,
6234 struct sk_buff *skb, u8 event)
6236 int err = 0;
6237 u16 txseq = control->txseq;
6238 bool skb_in_use = false;
6240 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6241 event);
6243 switch (event) {
6244 case L2CAP_EV_RECV_IFRAME:
6245 switch (l2cap_classify_txseq(chan, txseq)) {
6246 case L2CAP_TXSEQ_EXPECTED:
6247 /* Keep frame for reassembly later */
6248 l2cap_pass_to_tx(chan, control);
6249 skb_queue_tail(&chan->srej_q, skb);
6250 skb_in_use = true;
6251 BT_DBG("Queued %p (queue len %d)", skb,
6252 skb_queue_len(&chan->srej_q));
6254 chan->expected_tx_seq = __next_seq(chan, txseq);
6255 break;
6256 case L2CAP_TXSEQ_EXPECTED_SREJ:
6257 l2cap_seq_list_pop(&chan->srej_list);
6259 l2cap_pass_to_tx(chan, control);
6260 skb_queue_tail(&chan->srej_q, skb);
6261 skb_in_use = true;
6262 BT_DBG("Queued %p (queue len %d)", skb,
6263 skb_queue_len(&chan->srej_q));
6265 err = l2cap_rx_queued_iframes(chan);
6266 if (err)
6267 break;
6269 break;
6270 case L2CAP_TXSEQ_UNEXPECTED:
6271 /* Got a frame that can't be reassembled yet.
6272 * Save it for later, and send SREJs to cover
6273 * the missing frames.
6275 skb_queue_tail(&chan->srej_q, skb);
6276 skb_in_use = true;
6277 BT_DBG("Queued %p (queue len %d)", skb,
6278 skb_queue_len(&chan->srej_q));
6280 l2cap_pass_to_tx(chan, control);
6281 l2cap_send_srej(chan, control->txseq);
6282 break;
6283 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6284 /* This frame was requested with an SREJ, but
6285 * some expected retransmitted frames are
6286 * missing. Request retransmission of missing
6287 * SREJ'd frames.
6289 skb_queue_tail(&chan->srej_q, skb);
6290 skb_in_use = true;
6291 BT_DBG("Queued %p (queue len %d)", skb,
6292 skb_queue_len(&chan->srej_q));
6294 l2cap_pass_to_tx(chan, control);
6295 l2cap_send_srej_list(chan, control->txseq);
6296 break;
6297 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6298 /* We've already queued this frame. Drop this copy. */
6299 l2cap_pass_to_tx(chan, control);
6300 break;
6301 case L2CAP_TXSEQ_DUPLICATE:
6302 /* Expecting a later sequence number, so this frame
6303 * was already received. Ignore it completely.
6305 break;
6306 case L2CAP_TXSEQ_INVALID_IGNORE:
6307 break;
6308 case L2CAP_TXSEQ_INVALID:
6309 default:
6310 l2cap_send_disconn_req(chan, ECONNRESET);
6311 break;
6313 break;
6314 case L2CAP_EV_RECV_RR:
6315 l2cap_pass_to_tx(chan, control);
6316 if (control->final) {
6317 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6319 if (!test_and_clear_bit(CONN_REJ_ACT,
6320 &chan->conn_state)) {
6321 control->final = 0;
6322 l2cap_retransmit_all(chan, control);
6325 l2cap_ertm_send(chan);
6326 } else if (control->poll) {
6327 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6328 &chan->conn_state) &&
6329 chan->unacked_frames) {
6330 __set_retrans_timer(chan);
6333 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6334 l2cap_send_srej_tail(chan);
6335 } else {
6336 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6337 &chan->conn_state) &&
6338 chan->unacked_frames)
6339 __set_retrans_timer(chan);
6341 l2cap_send_ack(chan);
6343 break;
6344 case L2CAP_EV_RECV_RNR:
6345 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6346 l2cap_pass_to_tx(chan, control);
6347 if (control->poll) {
6348 l2cap_send_srej_tail(chan);
6349 } else {
6350 struct l2cap_ctrl rr_control;
6351 memset(&rr_control, 0, sizeof(rr_control));
6352 rr_control.sframe = 1;
6353 rr_control.super = L2CAP_SUPER_RR;
6354 rr_control.reqseq = chan->buffer_seq;
6355 l2cap_send_sframe(chan, &rr_control);
6358 break;
6359 case L2CAP_EV_RECV_REJ:
6360 l2cap_handle_rej(chan, control);
6361 break;
6362 case L2CAP_EV_RECV_SREJ:
6363 l2cap_handle_srej(chan, control);
6364 break;
6367 if (skb && !skb_in_use) {
6368 BT_DBG("Freeing %p", skb);
6369 kfree_skb(skb);
6372 return err;
6375 static int l2cap_finish_move(struct l2cap_chan *chan)
6377 BT_DBG("chan %p", chan);
6379 chan->rx_state = L2CAP_RX_STATE_RECV;
6381 if (chan->hs_hcon)
6382 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6383 else
6384 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6386 return l2cap_resegment(chan);
6389 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6390 struct l2cap_ctrl *control,
6391 struct sk_buff *skb, u8 event)
6393 int err;
6395 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6396 event);
6398 if (!control->poll)
6399 return -EPROTO;
6401 l2cap_process_reqseq(chan, control->reqseq);
6403 if (!skb_queue_empty(&chan->tx_q))
6404 chan->tx_send_head = skb_peek(&chan->tx_q);
6405 else
6406 chan->tx_send_head = NULL;
6408 /* Rewind next_tx_seq to the point expected
6409 * by the receiver.
6411 chan->next_tx_seq = control->reqseq;
6412 chan->unacked_frames = 0;
6414 err = l2cap_finish_move(chan);
6415 if (err)
6416 return err;
6418 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6419 l2cap_send_i_or_rr_or_rnr(chan);
6421 if (event == L2CAP_EV_RECV_IFRAME)
6422 return -EPROTO;
6424 return l2cap_rx_state_recv(chan, control, NULL, event);
6427 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6428 struct l2cap_ctrl *control,
6429 struct sk_buff *skb, u8 event)
6431 int err;
6433 if (!control->final)
6434 return -EPROTO;
6436 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6438 chan->rx_state = L2CAP_RX_STATE_RECV;
6439 l2cap_process_reqseq(chan, control->reqseq);
6441 if (!skb_queue_empty(&chan->tx_q))
6442 chan->tx_send_head = skb_peek(&chan->tx_q);
6443 else
6444 chan->tx_send_head = NULL;
6446 /* Rewind next_tx_seq to the point expected
6447 * by the receiver.
6449 chan->next_tx_seq = control->reqseq;
6450 chan->unacked_frames = 0;
6452 if (chan->hs_hcon)
6453 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6454 else
6455 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6457 err = l2cap_resegment(chan);
6459 if (!err)
6460 err = l2cap_rx_state_recv(chan, control, skb, event);
6462 return err;
6465 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6467 /* Make sure reqseq is for a packet that has been sent but not acked */
6468 u16 unacked;
6470 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6471 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6474 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6475 struct sk_buff *skb, u8 event)
6477 int err = 0;
6479 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6480 control, skb, event, chan->rx_state);
6482 if (__valid_reqseq(chan, control->reqseq)) {
6483 switch (chan->rx_state) {
6484 case L2CAP_RX_STATE_RECV:
6485 err = l2cap_rx_state_recv(chan, control, skb, event);
6486 break;
6487 case L2CAP_RX_STATE_SREJ_SENT:
6488 err = l2cap_rx_state_srej_sent(chan, control, skb,
6489 event);
6490 break;
6491 case L2CAP_RX_STATE_WAIT_P:
6492 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6493 break;
6494 case L2CAP_RX_STATE_WAIT_F:
6495 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6496 break;
6497 default:
6498 /* shut it down */
6499 break;
6501 } else {
6502 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6503 control->reqseq, chan->next_tx_seq,
6504 chan->expected_ack_seq);
6505 l2cap_send_disconn_req(chan, ECONNRESET);
6508 return err;
6511 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6512 struct sk_buff *skb)
6514 int err = 0;
6516 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6517 chan->rx_state);
6519 if (l2cap_classify_txseq(chan, control->txseq) ==
6520 L2CAP_TXSEQ_EXPECTED) {
6521 l2cap_pass_to_tx(chan, control);
6523 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6524 __next_seq(chan, chan->buffer_seq));
6526 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6528 l2cap_reassemble_sdu(chan, skb, control);
6529 } else {
6530 if (chan->sdu) {
6531 kfree_skb(chan->sdu);
6532 chan->sdu = NULL;
6534 chan->sdu_last_frag = NULL;
6535 chan->sdu_len = 0;
6537 if (skb) {
6538 BT_DBG("Freeing %p", skb);
6539 kfree_skb(skb);
6543 chan->last_acked_seq = control->txseq;
6544 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6546 return err;
6549 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6551 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6552 u16 len;
6553 u8 event;
6555 __unpack_control(chan, skb);
6557 len = skb->len;
6560 * We can just drop the corrupted I-frame here.
6561 * Receiver will miss it and start proper recovery
6562 * procedures and ask for retransmission.
6564 if (l2cap_check_fcs(chan, skb))
6565 goto drop;
6567 if (!control->sframe && control->sar == L2CAP_SAR_START)
6568 len -= L2CAP_SDULEN_SIZE;
6570 if (chan->fcs == L2CAP_FCS_CRC16)
6571 len -= L2CAP_FCS_SIZE;
6573 if (len > chan->mps) {
6574 l2cap_send_disconn_req(chan, ECONNRESET);
6575 goto drop;
6578 if (!control->sframe) {
6579 int err;
6581 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6582 control->sar, control->reqseq, control->final,
6583 control->txseq);
6585 /* Validate F-bit - F=0 always valid, F=1 only
6586 * valid in TX WAIT_F
6588 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6589 goto drop;
6591 if (chan->mode != L2CAP_MODE_STREAMING) {
6592 event = L2CAP_EV_RECV_IFRAME;
6593 err = l2cap_rx(chan, control, skb, event);
6594 } else {
6595 err = l2cap_stream_rx(chan, control, skb);
6598 if (err)
6599 l2cap_send_disconn_req(chan, ECONNRESET);
6600 } else {
6601 const u8 rx_func_to_event[4] = {
6602 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6603 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6606 /* Only I-frames are expected in streaming mode */
6607 if (chan->mode == L2CAP_MODE_STREAMING)
6608 goto drop;
6610 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6611 control->reqseq, control->final, control->poll,
6612 control->super);
6614 if (len != 0) {
6615 BT_ERR("Trailing bytes: %d in sframe", len);
6616 l2cap_send_disconn_req(chan, ECONNRESET);
6617 goto drop;
6620 /* Validate F and P bits */
6621 if (control->final && (control->poll ||
6622 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6623 goto drop;
6625 event = rx_func_to_event[control->super];
6626 if (l2cap_rx(chan, control, skb, event))
6627 l2cap_send_disconn_req(chan, ECONNRESET);
6630 return 0;
6632 drop:
6633 kfree_skb(skb);
6634 return 0;
6637 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6639 struct l2cap_conn *conn = chan->conn;
6640 struct l2cap_le_credits pkt;
6641 u16 return_credits;
6643 /* We return more credits to the sender only after the amount of
6644 * credits falls below half of the initial amount.
6646 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6647 return;
6649 return_credits = le_max_credits - chan->rx_credits;
6651 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6653 chan->rx_credits += return_credits;
6655 pkt.cid = cpu_to_le16(chan->scid);
6656 pkt.credits = cpu_to_le16(return_credits);
6658 chan->ident = l2cap_get_ident(conn);
6660 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6663 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6665 int err;
6667 if (!chan->rx_credits) {
6668 BT_ERR("No credits to receive LE L2CAP data");
6669 l2cap_send_disconn_req(chan, ECONNRESET);
6670 return -ENOBUFS;
6673 if (chan->imtu < skb->len) {
6674 BT_ERR("Too big LE L2CAP PDU");
6675 return -ENOBUFS;
6678 chan->rx_credits--;
6679 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6681 l2cap_chan_le_send_credits(chan);
6683 err = 0;
6685 if (!chan->sdu) {
6686 u16 sdu_len;
6688 sdu_len = get_unaligned_le16(skb->data);
6689 skb_pull(skb, L2CAP_SDULEN_SIZE);
6691 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6692 sdu_len, skb->len, chan->imtu);
6694 if (sdu_len > chan->imtu) {
6695 BT_ERR("Too big LE L2CAP SDU length received");
6696 err = -EMSGSIZE;
6697 goto failed;
6700 if (skb->len > sdu_len) {
6701 BT_ERR("Too much LE L2CAP data received");
6702 err = -EINVAL;
6703 goto failed;
6706 if (skb->len == sdu_len)
6707 return chan->ops->recv(chan, skb);
6709 chan->sdu = skb;
6710 chan->sdu_len = sdu_len;
6711 chan->sdu_last_frag = skb;
6713 return 0;
6716 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6717 chan->sdu->len, skb->len, chan->sdu_len);
6719 if (chan->sdu->len + skb->len > chan->sdu_len) {
6720 BT_ERR("Too much LE L2CAP data received");
6721 err = -EINVAL;
6722 goto failed;
6725 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6726 skb = NULL;
6728 if (chan->sdu->len == chan->sdu_len) {
6729 err = chan->ops->recv(chan, chan->sdu);
6730 if (!err) {
6731 chan->sdu = NULL;
6732 chan->sdu_last_frag = NULL;
6733 chan->sdu_len = 0;
6737 failed:
6738 if (err) {
6739 kfree_skb(skb);
6740 kfree_skb(chan->sdu);
6741 chan->sdu = NULL;
6742 chan->sdu_last_frag = NULL;
6743 chan->sdu_len = 0;
6746 /* We can't return an error here since we took care of the skb
6747 * freeing internally. An error return would cause the caller to
6748 * do a double-free of the skb.
6750 return 0;
6753 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6754 struct sk_buff *skb)
6756 struct l2cap_chan *chan;
6758 chan = l2cap_get_chan_by_scid(conn, cid);
6759 if (!chan) {
6760 if (cid == L2CAP_CID_A2MP) {
6761 chan = a2mp_channel_create(conn, skb);
6762 if (!chan) {
6763 kfree_skb(skb);
6764 return;
6767 l2cap_chan_lock(chan);
6768 } else {
6769 BT_DBG("unknown cid 0x%4.4x", cid);
6770 /* Drop packet and return */
6771 kfree_skb(skb);
6772 return;
6776 BT_DBG("chan %p, len %d", chan, skb->len);
6778 if (chan->state != BT_CONNECTED)
6779 goto drop;
6781 switch (chan->mode) {
6782 case L2CAP_MODE_LE_FLOWCTL:
6783 if (l2cap_le_data_rcv(chan, skb) < 0)
6784 goto drop;
6786 goto done;
6788 case L2CAP_MODE_BASIC:
6789 /* If socket recv buffers overflows we drop data here
6790 * which is *bad* because L2CAP has to be reliable.
6791 * But we don't have any other choice. L2CAP doesn't
6792 * provide flow control mechanism. */
6794 if (chan->imtu < skb->len) {
6795 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6796 goto drop;
6799 if (!chan->ops->recv(chan, skb))
6800 goto done;
6801 break;
6803 case L2CAP_MODE_ERTM:
6804 case L2CAP_MODE_STREAMING:
6805 l2cap_data_rcv(chan, skb);
6806 goto done;
6808 default:
6809 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6810 break;
6813 drop:
6814 kfree_skb(skb);
6816 done:
6817 l2cap_chan_unlock(chan);
6820 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6821 struct sk_buff *skb)
6823 struct hci_conn *hcon = conn->hcon;
6824 struct l2cap_chan *chan;
6826 if (hcon->type != ACL_LINK)
6827 goto free_skb;
6829 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6830 ACL_LINK);
6831 if (!chan)
6832 goto free_skb;
6834 BT_DBG("chan %p, len %d", chan, skb->len);
6836 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6837 goto drop;
6839 if (chan->imtu < skb->len)
6840 goto drop;
6842 /* Store remote BD_ADDR and PSM for msg_name */
6843 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6844 bt_cb(skb)->psm = psm;
6846 if (!chan->ops->recv(chan, skb)) {
6847 l2cap_chan_put(chan);
6848 return;
6851 drop:
6852 l2cap_chan_put(chan);
6853 free_skb:
6854 kfree_skb(skb);
6857 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6859 struct l2cap_hdr *lh = (void *) skb->data;
6860 struct hci_conn *hcon = conn->hcon;
6861 u16 cid, len;
6862 __le16 psm;
6864 if (hcon->state != BT_CONNECTED) {
6865 BT_DBG("queueing pending rx skb");
6866 skb_queue_tail(&conn->pending_rx, skb);
6867 return;
6870 skb_pull(skb, L2CAP_HDR_SIZE);
6871 cid = __le16_to_cpu(lh->cid);
6872 len = __le16_to_cpu(lh->len);
6874 if (len != skb->len) {
6875 kfree_skb(skb);
6876 return;
6879 /* Since we can't actively block incoming LE connections we must
6880 * at least ensure that we ignore incoming data from them.
6882 if (hcon->type == LE_LINK &&
6883 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6884 bdaddr_type(hcon, hcon->dst_type))) {
6885 kfree_skb(skb);
6886 return;
6889 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6891 switch (cid) {
6892 case L2CAP_CID_SIGNALING:
6893 l2cap_sig_channel(conn, skb);
6894 break;
6896 case L2CAP_CID_CONN_LESS:
6897 psm = get_unaligned((__le16 *) skb->data);
6898 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6899 l2cap_conless_channel(conn, psm, skb);
6900 break;
6902 case L2CAP_CID_LE_SIGNALING:
6903 l2cap_le_sig_channel(conn, skb);
6904 break;
6906 default:
6907 l2cap_data_channel(conn, cid, skb);
6908 break;
6912 static void process_pending_rx(struct work_struct *work)
6914 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6915 pending_rx_work);
6916 struct sk_buff *skb;
6918 BT_DBG("");
6920 while ((skb = skb_dequeue(&conn->pending_rx)))
6921 l2cap_recv_frame(conn, skb);
6924 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6926 struct l2cap_conn *conn = hcon->l2cap_data;
6927 struct hci_chan *hchan;
6929 if (conn)
6930 return conn;
6932 hchan = hci_chan_create(hcon);
6933 if (!hchan)
6934 return NULL;
6936 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6937 if (!conn) {
6938 hci_chan_del(hchan);
6939 return NULL;
6942 kref_init(&conn->ref);
6943 hcon->l2cap_data = conn;
6944 conn->hcon = hci_conn_get(hcon);
6945 conn->hchan = hchan;
6947 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6949 switch (hcon->type) {
6950 case LE_LINK:
6951 if (hcon->hdev->le_mtu) {
6952 conn->mtu = hcon->hdev->le_mtu;
6953 break;
6955 /* fall through */
6956 default:
6957 conn->mtu = hcon->hdev->acl_mtu;
6958 break;
6961 conn->feat_mask = 0;
6963 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6965 if (hcon->type == ACL_LINK &&
6966 test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
6967 conn->local_fixed_chan |= L2CAP_FC_A2MP;
6969 if (bredr_sc_enabled(hcon->hdev) &&
6970 test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
6971 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6973 mutex_init(&conn->ident_lock);
6974 mutex_init(&conn->chan_lock);
6976 INIT_LIST_HEAD(&conn->chan_l);
6977 INIT_LIST_HEAD(&conn->users);
6979 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6981 skb_queue_head_init(&conn->pending_rx);
6982 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6983 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6985 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6987 return conn;
6990 static bool is_valid_psm(u16 psm, u8 dst_type) {
6991 if (!psm)
6992 return false;
6994 if (bdaddr_type_is_le(dst_type))
6995 return (psm <= 0x00ff);
6997 /* PSM must be odd and lsb of upper byte must be 0 */
6998 return ((psm & 0x0101) == 0x0001);
7001 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7002 bdaddr_t *dst, u8 dst_type)
7004 struct l2cap_conn *conn;
7005 struct hci_conn *hcon;
7006 struct hci_dev *hdev;
7007 int err;
7009 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7010 dst_type, __le16_to_cpu(psm));
7012 hdev = hci_get_route(dst, &chan->src);
7013 if (!hdev)
7014 return -EHOSTUNREACH;
7016 hci_dev_lock(hdev);
7018 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7019 chan->chan_type != L2CAP_CHAN_RAW) {
7020 err = -EINVAL;
7021 goto done;
7024 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7025 err = -EINVAL;
7026 goto done;
7029 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7030 err = -EINVAL;
7031 goto done;
7034 switch (chan->mode) {
7035 case L2CAP_MODE_BASIC:
7036 break;
7037 case L2CAP_MODE_LE_FLOWCTL:
7038 l2cap_le_flowctl_init(chan);
7039 break;
7040 case L2CAP_MODE_ERTM:
7041 case L2CAP_MODE_STREAMING:
7042 if (!disable_ertm)
7043 break;
7044 /* fall through */
7045 default:
7046 err = -EOPNOTSUPP;
7047 goto done;
7050 switch (chan->state) {
7051 case BT_CONNECT:
7052 case BT_CONNECT2:
7053 case BT_CONFIG:
7054 /* Already connecting */
7055 err = 0;
7056 goto done;
7058 case BT_CONNECTED:
7059 /* Already connected */
7060 err = -EISCONN;
7061 goto done;
7063 case BT_OPEN:
7064 case BT_BOUND:
7065 /* Can connect */
7066 break;
7068 default:
7069 err = -EBADFD;
7070 goto done;
7073 /* Set destination address and psm */
7074 bacpy(&chan->dst, dst);
7075 chan->dst_type = dst_type;
7077 chan->psm = psm;
7078 chan->dcid = cid;
7080 if (bdaddr_type_is_le(dst_type)) {
7081 u8 role;
7083 /* Convert from L2CAP channel address type to HCI address type
7085 if (dst_type == BDADDR_LE_PUBLIC)
7086 dst_type = ADDR_LE_DEV_PUBLIC;
7087 else
7088 dst_type = ADDR_LE_DEV_RANDOM;
7090 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7091 role = HCI_ROLE_SLAVE;
7092 else
7093 role = HCI_ROLE_MASTER;
7095 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7096 HCI_LE_CONN_TIMEOUT, role);
7097 } else {
7098 u8 auth_type = l2cap_get_auth_type(chan);
7099 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7102 if (IS_ERR(hcon)) {
7103 err = PTR_ERR(hcon);
7104 goto done;
7107 conn = l2cap_conn_add(hcon);
7108 if (!conn) {
7109 hci_conn_drop(hcon);
7110 err = -ENOMEM;
7111 goto done;
7114 mutex_lock(&conn->chan_lock);
7115 l2cap_chan_lock(chan);
7117 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7118 hci_conn_drop(hcon);
7119 err = -EBUSY;
7120 goto chan_unlock;
7123 /* Update source addr of the socket */
7124 bacpy(&chan->src, &hcon->src);
7125 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7127 __l2cap_chan_add(conn, chan);
7129 /* l2cap_chan_add takes its own ref so we can drop this one */
7130 hci_conn_drop(hcon);
7132 l2cap_state_change(chan, BT_CONNECT);
7133 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7135 /* Release chan->sport so that it can be reused by other
7136 * sockets (as it's only used for listening sockets).
7138 write_lock(&chan_list_lock);
7139 chan->sport = 0;
7140 write_unlock(&chan_list_lock);
7142 if (hcon->state == BT_CONNECTED) {
7143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7144 __clear_chan_timer(chan);
7145 if (l2cap_chan_check_security(chan, true))
7146 l2cap_state_change(chan, BT_CONNECTED);
7147 } else
7148 l2cap_do_start(chan);
7151 err = 0;
7153 chan_unlock:
7154 l2cap_chan_unlock(chan);
7155 mutex_unlock(&conn->chan_lock);
7156 done:
7157 hci_dev_unlock(hdev);
7158 hci_dev_put(hdev);
7159 return err;
7161 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7163 /* ---- L2CAP interface with lower layer (HCI) ---- */
7165 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7167 int exact = 0, lm1 = 0, lm2 = 0;
7168 struct l2cap_chan *c;
7170 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7172 /* Find listening sockets and check their link_mode */
7173 read_lock(&chan_list_lock);
7174 list_for_each_entry(c, &chan_list, global_l) {
7175 if (c->state != BT_LISTEN)
7176 continue;
7178 if (!bacmp(&c->src, &hdev->bdaddr)) {
7179 lm1 |= HCI_LM_ACCEPT;
7180 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7181 lm1 |= HCI_LM_MASTER;
7182 exact++;
7183 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7184 lm2 |= HCI_LM_ACCEPT;
7185 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7186 lm2 |= HCI_LM_MASTER;
7189 read_unlock(&chan_list_lock);
7191 return exact ? lm1 : lm2;
7194 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7195 * from an existing channel in the list or from the beginning of the
7196 * global list (by passing NULL as first parameter).
7198 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7199 bdaddr_t *src, u8 link_type)
7201 read_lock(&chan_list_lock);
7203 if (c)
7204 c = list_next_entry(c, global_l);
7205 else
7206 c = list_entry(chan_list.next, typeof(*c), global_l);
7208 list_for_each_entry_from(c, &chan_list, global_l) {
7209 if (c->chan_type != L2CAP_CHAN_FIXED)
7210 continue;
7211 if (c->state != BT_LISTEN)
7212 continue;
7213 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7214 continue;
7215 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7216 continue;
7217 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7218 continue;
7220 l2cap_chan_hold(c);
7221 read_unlock(&chan_list_lock);
7222 return c;
7225 read_unlock(&chan_list_lock);
7227 return NULL;
7230 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7232 struct hci_dev *hdev = hcon->hdev;
7233 struct l2cap_conn *conn;
7234 struct l2cap_chan *pchan;
7235 u8 dst_type;
7237 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7239 if (status) {
7240 l2cap_conn_del(hcon, bt_to_errno(status));
7241 return;
7244 conn = l2cap_conn_add(hcon);
7245 if (!conn)
7246 return;
7248 dst_type = bdaddr_type(hcon, hcon->dst_type);
7250 /* If device is blocked, do not create channels for it */
7251 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7252 return;
7254 /* Find fixed channels and notify them of the new connection. We
7255 * use multiple individual lookups, continuing each time where
7256 * we left off, because the list lock would prevent calling the
7257 * potentially sleeping l2cap_chan_lock() function.
7259 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7260 while (pchan) {
7261 struct l2cap_chan *chan, *next;
7263 /* Client fixed channels should override server ones */
7264 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7265 goto next;
7267 l2cap_chan_lock(pchan);
7268 chan = pchan->ops->new_connection(pchan);
7269 if (chan) {
7270 bacpy(&chan->src, &hcon->src);
7271 bacpy(&chan->dst, &hcon->dst);
7272 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7273 chan->dst_type = dst_type;
7275 __l2cap_chan_add(conn, chan);
7278 l2cap_chan_unlock(pchan);
7279 next:
7280 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7281 hcon->type);
7282 l2cap_chan_put(pchan);
7283 pchan = next;
7286 l2cap_conn_ready(conn);
7289 int l2cap_disconn_ind(struct hci_conn *hcon)
7291 struct l2cap_conn *conn = hcon->l2cap_data;
7293 BT_DBG("hcon %p", hcon);
7295 if (!conn)
7296 return HCI_ERROR_REMOTE_USER_TERM;
7297 return conn->disc_reason;
7300 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7302 BT_DBG("hcon %p reason %d", hcon, reason);
7304 l2cap_conn_del(hcon, bt_to_errno(reason));
7307 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7309 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7310 return;
7312 if (encrypt == 0x00) {
7313 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7314 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7315 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7316 chan->sec_level == BT_SECURITY_FIPS)
7317 l2cap_chan_close(chan, ECONNREFUSED);
7318 } else {
7319 if (chan->sec_level == BT_SECURITY_MEDIUM)
7320 __clear_chan_timer(chan);
7324 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7326 struct l2cap_conn *conn = hcon->l2cap_data;
7327 struct l2cap_chan *chan;
7329 if (!conn)
7330 return 0;
7332 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7334 mutex_lock(&conn->chan_lock);
7336 list_for_each_entry(chan, &conn->chan_l, list) {
7337 l2cap_chan_lock(chan);
7339 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7340 state_to_string(chan->state));
7342 if (chan->scid == L2CAP_CID_A2MP) {
7343 l2cap_chan_unlock(chan);
7344 continue;
7347 if (!status && encrypt)
7348 chan->sec_level = hcon->sec_level;
7350 if (!__l2cap_no_conn_pending(chan)) {
7351 l2cap_chan_unlock(chan);
7352 continue;
7355 if (!status && (chan->state == BT_CONNECTED ||
7356 chan->state == BT_CONFIG)) {
7357 chan->ops->resume(chan);
7358 l2cap_check_encryption(chan, encrypt);
7359 l2cap_chan_unlock(chan);
7360 continue;
7363 if (chan->state == BT_CONNECT) {
7364 if (!status)
7365 l2cap_start_connection(chan);
7366 else
7367 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7368 } else if (chan->state == BT_CONNECT2 &&
7369 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7370 struct l2cap_conn_rsp rsp;
7371 __u16 res, stat;
7373 if (!status) {
7374 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7375 res = L2CAP_CR_PEND;
7376 stat = L2CAP_CS_AUTHOR_PEND;
7377 chan->ops->defer(chan);
7378 } else {
7379 l2cap_state_change(chan, BT_CONFIG);
7380 res = L2CAP_CR_SUCCESS;
7381 stat = L2CAP_CS_NO_INFO;
7383 } else {
7384 l2cap_state_change(chan, BT_DISCONN);
7385 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7386 res = L2CAP_CR_SEC_BLOCK;
7387 stat = L2CAP_CS_NO_INFO;
7390 rsp.scid = cpu_to_le16(chan->dcid);
7391 rsp.dcid = cpu_to_le16(chan->scid);
7392 rsp.result = cpu_to_le16(res);
7393 rsp.status = cpu_to_le16(stat);
7394 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7395 sizeof(rsp), &rsp);
7397 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7398 res == L2CAP_CR_SUCCESS) {
7399 char buf[128];
7400 set_bit(CONF_REQ_SENT, &chan->conf_state);
7401 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7402 L2CAP_CONF_REQ,
7403 l2cap_build_conf_req(chan, buf),
7404 buf);
7405 chan->num_conf_req++;
7409 l2cap_chan_unlock(chan);
7412 mutex_unlock(&conn->chan_lock);
7414 return 0;
7417 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7419 struct l2cap_conn *conn = hcon->l2cap_data;
7420 struct l2cap_hdr *hdr;
7421 int len;
7423 /* For AMP controller do not create l2cap conn */
7424 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7425 goto drop;
7427 if (!conn)
7428 conn = l2cap_conn_add(hcon);
7430 if (!conn)
7431 goto drop;
7433 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7435 switch (flags) {
7436 case ACL_START:
7437 case ACL_START_NO_FLUSH:
7438 case ACL_COMPLETE:
7439 if (conn->rx_len) {
7440 BT_ERR("Unexpected start frame (len %d)", skb->len);
7441 kfree_skb(conn->rx_skb);
7442 conn->rx_skb = NULL;
7443 conn->rx_len = 0;
7444 l2cap_conn_unreliable(conn, ECOMM);
7447 /* Start fragment always begin with Basic L2CAP header */
7448 if (skb->len < L2CAP_HDR_SIZE) {
7449 BT_ERR("Frame is too short (len %d)", skb->len);
7450 l2cap_conn_unreliable(conn, ECOMM);
7451 goto drop;
7454 hdr = (struct l2cap_hdr *) skb->data;
7455 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7457 if (len == skb->len) {
7458 /* Complete frame received */
7459 l2cap_recv_frame(conn, skb);
7460 return 0;
7463 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7465 if (skb->len > len) {
7466 BT_ERR("Frame is too long (len %d, expected len %d)",
7467 skb->len, len);
7468 l2cap_conn_unreliable(conn, ECOMM);
7469 goto drop;
7472 /* Allocate skb for the complete frame (with header) */
7473 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7474 if (!conn->rx_skb)
7475 goto drop;
7477 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7478 skb->len);
7479 conn->rx_len = len - skb->len;
7480 break;
7482 case ACL_CONT:
7483 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7485 if (!conn->rx_len) {
7486 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7487 l2cap_conn_unreliable(conn, ECOMM);
7488 goto drop;
7491 if (skb->len > conn->rx_len) {
7492 BT_ERR("Fragment is too long (len %d, expected %d)",
7493 skb->len, conn->rx_len);
7494 kfree_skb(conn->rx_skb);
7495 conn->rx_skb = NULL;
7496 conn->rx_len = 0;
7497 l2cap_conn_unreliable(conn, ECOMM);
7498 goto drop;
7501 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7502 skb->len);
7503 conn->rx_len -= skb->len;
7505 if (!conn->rx_len) {
7506 /* Complete frame received. l2cap_recv_frame
7507 * takes ownership of the skb so set the global
7508 * rx_skb pointer to NULL first.
7510 struct sk_buff *rx_skb = conn->rx_skb;
7511 conn->rx_skb = NULL;
7512 l2cap_recv_frame(conn, rx_skb);
7514 break;
7517 drop:
7518 kfree_skb(skb);
7519 return 0;
7522 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7524 struct l2cap_chan *c;
7526 read_lock(&chan_list_lock);
7528 list_for_each_entry(c, &chan_list, global_l) {
7529 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7530 &c->src, &c->dst,
7531 c->state, __le16_to_cpu(c->psm),
7532 c->scid, c->dcid, c->imtu, c->omtu,
7533 c->sec_level, c->mode);
7536 read_unlock(&chan_list_lock);
7538 return 0;
7541 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7543 return single_open(file, l2cap_debugfs_show, inode->i_private);
7546 static const struct file_operations l2cap_debugfs_fops = {
7547 .open = l2cap_debugfs_open,
7548 .read = seq_read,
7549 .llseek = seq_lseek,
7550 .release = single_release,
7553 static struct dentry *l2cap_debugfs;
7555 int __init l2cap_init(void)
7557 int err;
7559 err = l2cap_init_sockets();
7560 if (err < 0)
7561 return err;
7563 if (IS_ERR_OR_NULL(bt_debugfs))
7564 return 0;
7566 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7567 NULL, &l2cap_debugfs_fops);
7569 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7570 &le_max_credits);
7571 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7572 &le_default_mps);
7574 return 0;
7577 void l2cap_exit(void)
7579 debugfs_remove(l2cap_debugfs);
7580 l2cap_cleanup_sockets();
7583 module_param(disable_ertm, bool, 0644);
7584 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");