rtc: rtc-ab-b5ze-s3: add sub-minute alarm support
[linux/fpc-iii.git] / net / bluetooth / l2cap_core.c
blob6ba33f9631e8e5830374ab4e51720c493969c67c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
44 #define LE_FLOWCTL_MAX_CREDITS 65535
46 bool disable_ertm;
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
75 return BDADDR_BREDR;
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 return bdaddr_type(hcon->type, hcon->src_type);
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 return bdaddr_type(hcon->type, hcon->dst_type);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
93 struct l2cap_chan *c;
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
99 return NULL;
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
111 return NULL;
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
119 struct l2cap_chan *c;
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c)
124 l2cap_chan_lock(c);
125 mutex_unlock(&conn->chan_lock);
127 return c;
130 /* Find channel with given DCID.
131 * Returns locked channel.
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
144 return c;
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
150 struct l2cap_chan *c;
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
156 return NULL;
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
162 struct l2cap_chan *c;
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
170 return c;
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 struct l2cap_chan *c;
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
179 return c;
181 return NULL;
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 int err;
188 write_lock(&chan_list_lock);
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 err = -EADDRINUSE;
192 goto done;
195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
212 done:
213 write_unlock(&chan_list_lock);
214 return err;
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
220 write_lock(&chan_list_lock);
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
226 chan->scid = scid;
228 write_unlock(&chan_list_lock);
230 return 0;
233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
235 u16 cid, dyn_end;
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
242 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
244 return cid;
247 return 0;
250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
255 chan->state = state;
256 chan->ops->state_change(chan, state, 0);
259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
262 chan->state = state;
263 chan->ops->state_change(chan, chan->state, err);
266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
268 chan->ops->state_change(chan, chan->state, err);
271 static void __set_retrans_timer(struct l2cap_chan *chan)
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
280 static void __set_monitor_timer(struct l2cap_chan *chan)
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
292 struct sk_buff *skb;
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->control.txseq == seq)
296 return skb;
299 return NULL;
302 /* ---- L2CAP sequence number lists ---- */
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
315 size_t alloc_size, i;
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
321 alloc_size = roundup_pow_of_two(size);
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
333 return 0;
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
338 kfree(seq_list->list);
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
361 return seq;
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 u16 i;
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
434 mutex_init(&chan->lock);
436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
439 write_lock(&chan_list_lock);
440 list_add(&chan->global_l, &chan_list);
441 write_unlock(&chan_list_lock);
443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
445 chan->state = BT_OPEN;
447 kref_init(&chan->kref);
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
452 BT_DBG("chan %p", chan);
454 return chan;
456 EXPORT_SYMBOL_GPL(l2cap_chan_create);
458 static void l2cap_chan_destroy(struct kref *kref)
460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
462 BT_DBG("chan %p", chan);
464 write_lock(&chan_list_lock);
465 list_del(&chan->global_l);
466 write_unlock(&chan_list_lock);
468 kfree(chan);
471 void l2cap_chan_hold(struct l2cap_chan *c)
473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 kref_get(&c->kref);
478 void l2cap_chan_put(struct l2cap_chan *c)
480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
482 kref_put(&c->kref, l2cap_chan_destroy);
484 EXPORT_SYMBOL_GPL(l2cap_chan_put);
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->sec_level = BT_SECURITY_LOW;
496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
505 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
510 chan->tx_credits = 0;
511 chan->rx_credits = le_max_credits;
512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
514 skb_queue_head_init(&chan->tx_q);
517 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
520 __le16_to_cpu(chan->psm), chan->dcid);
522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
524 chan->conn = conn;
526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 break;
534 case L2CAP_CHAN_CONN_LESS:
535 /* Connectionless socket */
536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
538 chan->omtu = L2CAP_DEFAULT_MTU;
539 break;
541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
543 break;
545 default:
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
559 l2cap_chan_hold(chan);
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
566 list_add(&chan->list, &conn->chan_l);
569 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
573 mutex_unlock(&conn->chan_lock);
576 void l2cap_chan_del(struct l2cap_chan *chan, int err)
578 struct l2cap_conn *conn = chan->conn;
580 __clear_chan_timer(chan);
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
585 chan->ops->teardown(chan, err);
587 if (conn) {
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
592 l2cap_chan_put(chan);
594 chan->conn = NULL;
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
602 hci_conn_drop(conn->hcon);
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
631 skb_queue_purge(&chan->srej_q);
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
636 /* fall through */
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
643 return;
645 EXPORT_SYMBOL_GPL(l2cap_chan_del);
647 static void l2cap_conn_update_id_addr(struct work_struct *work)
649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
652 struct l2cap_chan *chan;
654 mutex_lock(&conn->chan_lock);
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
659 chan->dst_type = bdaddr_dst_type(hcon);
660 l2cap_chan_unlock(chan);
663 mutex_unlock(&conn->chan_lock);
666 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
677 l2cap_state_change(chan, BT_DISCONN);
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
681 rsp.mps = cpu_to_le16(chan->mps);
682 rsp.credits = cpu_to_le16(chan->rx_credits);
683 rsp.result = cpu_to_le16(result);
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
689 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
700 l2cap_state_change(chan, BT_DISCONN);
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
710 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
712 struct l2cap_conn *conn = chan->conn;
714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
716 switch (chan->state) {
717 case BT_LISTEN:
718 chan->ops->teardown(chan, 0);
719 break;
721 case BT_CONNECTED:
722 case BT_CONFIG:
723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
725 l2cap_send_disconn_req(chan, reason);
726 } else
727 l2cap_chan_del(chan, reason);
728 break;
730 case BT_CONNECT2:
731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
738 l2cap_chan_del(chan, reason);
739 break;
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
746 default:
747 chan->ops->teardown(chan, 0);
748 break;
751 EXPORT_SYMBOL(l2cap_chan_close);
753 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 case BT_SECURITY_FIPS:
760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
766 break;
767 case L2CAP_CHAN_CONN_LESS:
768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
778 case L2CAP_CHAN_CONN_ORIENTED:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
789 /* fall through */
790 default:
791 switch (chan->sec_level) {
792 case BT_SECURITY_HIGH:
793 case BT_SECURITY_FIPS:
794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
800 break;
804 /* Service level security */
805 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
807 struct l2cap_conn *conn = chan->conn;
808 __u8 auth_type;
810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
813 auth_type = l2cap_get_auth_type(chan);
815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
819 static u8 l2cap_get_ident(struct l2cap_conn *conn)
821 u8 id;
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
829 mutex_lock(&conn->ident_lock);
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
834 id = conn->tx_ident;
836 mutex_unlock(&conn->ident_lock);
838 return id;
841 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
845 u8 flags;
847 BT_DBG("code 0x%2.2x", code);
849 if (!skb)
850 return;
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
861 skb->priority = HCI_PRIO_MAX;
863 hci_send_acl(conn->hchan, skb, flags);
866 static bool __chan_is_moving(struct l2cap_chan *chan)
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
872 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
878 skb->priority);
880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
886 return;
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
904 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
923 control->poll = 0;
924 control->super = 0;
928 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
947 control->poll = 0;
948 control->super = 0;
952 static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->control);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->control);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
966 static u32 __pack_extended_control(struct l2cap_ctrl *control)
968 u32 packed;
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
982 return packed;
985 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
987 u16 packed;
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1001 return packed;
1004 static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1025 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
1030 int hlen = __ertm_hdr_size(chan);
1032 if (chan->fcs == L2CAP_FCS_CRC16)
1033 hlen += L2CAP_FCS_SIZE;
1035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1037 if (!skb)
1038 return ERR_PTR(-ENOMEM);
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1042 lh->cid = cpu_to_le16(chan->dcid);
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1049 if (chan->fcs == L2CAP_FCS_CRC16) {
1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1054 skb->priority = HCI_PRIO_MAX;
1055 return skb;
1058 static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1061 struct sk_buff *skb;
1062 u32 control_field;
1064 BT_DBG("chan %p, control %p", chan, control);
1066 if (!control->sframe)
1067 return;
1069 if (__chan_is_moving(chan))
1070 return;
1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1101 struct l2cap_ctrl control;
1103 BT_DBG("chan %p, poll %d", chan, poll);
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1126 static bool __amp_capable(struct l2cap_chan *chan)
1128 struct l2cap_conn *conn = chan->conn;
1129 struct hci_dev *hdev;
1130 bool amp_available = false;
1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1133 return false;
1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1136 return false;
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1146 read_unlock(&hci_dev_list_lock);
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
1151 return false;
1154 static bool l2cap_check_efs(struct l2cap_chan *chan)
1156 /* Check EFS parameters */
1157 return true;
1160 void l2cap_send_conn_req(struct l2cap_chan *chan)
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1168 chan->ident = l2cap_get_ident(conn);
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1175 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1182 chan->ident = l2cap_get_ident(chan->conn);
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1188 static void l2cap_move_setup(struct l2cap_chan *chan)
1190 struct sk_buff *skb;
1192 BT_DBG("chan %p", chan);
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->control.retries)
1204 bt_cb(skb)->control.retries = 1;
1205 else
1206 break;
1209 chan->expected_tx_seq = chan->buffer_seq;
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1223 static void l2cap_move_done(struct l2cap_chan *chan)
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1245 static void l2cap_chan_ready(struct l2cap_chan *chan)
1247 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1248 chan->conf_state = 0;
1249 __clear_chan_timer(chan);
1251 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1252 chan->ops->suspend(chan);
1254 chan->state = BT_CONNECTED;
1256 chan->ops->ready(chan);
1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1261 struct l2cap_conn *conn = chan->conn;
1262 struct l2cap_le_conn_req req;
1264 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1265 return;
1267 req.psm = chan->psm;
1268 req.scid = cpu_to_le16(chan->scid);
1269 req.mtu = cpu_to_le16(chan->imtu);
1270 req.mps = cpu_to_le16(chan->mps);
1271 req.credits = cpu_to_le16(chan->rx_credits);
1273 chan->ident = l2cap_get_ident(conn);
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1276 sizeof(req), &req);
1279 static void l2cap_le_start(struct l2cap_chan *chan)
1281 struct l2cap_conn *conn = chan->conn;
1283 if (!smp_conn_security(conn->hcon, chan->sec_level))
1284 return;
1286 if (!chan->psm) {
1287 l2cap_chan_ready(chan);
1288 return;
1291 if (chan->state == BT_CONNECT)
1292 l2cap_le_connect(chan);
1295 static void l2cap_start_connection(struct l2cap_chan *chan)
1297 if (__amp_capable(chan)) {
1298 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1299 a2mp_discover_amp(chan);
1300 } else if (chan->conn->hcon->type == LE_LINK) {
1301 l2cap_le_start(chan);
1302 } else {
1303 l2cap_send_conn_req(chan);
1307 static void l2cap_request_info(struct l2cap_conn *conn)
1309 struct l2cap_info_req req;
1311 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1312 return;
1314 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1316 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1317 conn->info_ident = l2cap_get_ident(conn);
1319 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1321 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1322 sizeof(req), &req);
1325 static void l2cap_do_start(struct l2cap_chan *chan)
1327 struct l2cap_conn *conn = chan->conn;
1329 if (conn->hcon->type == LE_LINK) {
1330 l2cap_le_start(chan);
1331 return;
1334 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1335 l2cap_request_info(conn);
1336 return;
1339 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1340 return;
1342 if (l2cap_chan_check_security(chan, true) &&
1343 __l2cap_no_conn_pending(chan))
1344 l2cap_start_connection(chan);
1347 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1349 u32 local_feat_mask = l2cap_feat_mask;
1350 if (!disable_ertm)
1351 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1353 switch (mode) {
1354 case L2CAP_MODE_ERTM:
1355 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1356 case L2CAP_MODE_STREAMING:
1357 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1358 default:
1359 return 0x00;
1363 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1365 struct l2cap_conn *conn = chan->conn;
1366 struct l2cap_disconn_req req;
1368 if (!conn)
1369 return;
1371 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1372 __clear_retrans_timer(chan);
1373 __clear_monitor_timer(chan);
1374 __clear_ack_timer(chan);
1377 if (chan->scid == L2CAP_CID_A2MP) {
1378 l2cap_state_change(chan, BT_DISCONN);
1379 return;
1382 req.dcid = cpu_to_le16(chan->dcid);
1383 req.scid = cpu_to_le16(chan->scid);
1384 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1385 sizeof(req), &req);
1387 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1390 /* ---- L2CAP connections ---- */
1391 static void l2cap_conn_start(struct l2cap_conn *conn)
1393 struct l2cap_chan *chan, *tmp;
1395 BT_DBG("conn %p", conn);
1397 mutex_lock(&conn->chan_lock);
1399 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1400 l2cap_chan_lock(chan);
1402 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1403 l2cap_chan_ready(chan);
1404 l2cap_chan_unlock(chan);
1405 continue;
1408 if (chan->state == BT_CONNECT) {
1409 if (!l2cap_chan_check_security(chan, true) ||
1410 !__l2cap_no_conn_pending(chan)) {
1411 l2cap_chan_unlock(chan);
1412 continue;
1415 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1416 && test_bit(CONF_STATE2_DEVICE,
1417 &chan->conf_state)) {
1418 l2cap_chan_close(chan, ECONNRESET);
1419 l2cap_chan_unlock(chan);
1420 continue;
1423 l2cap_start_connection(chan);
1425 } else if (chan->state == BT_CONNECT2) {
1426 struct l2cap_conn_rsp rsp;
1427 char buf[128];
1428 rsp.scid = cpu_to_le16(chan->dcid);
1429 rsp.dcid = cpu_to_le16(chan->scid);
1431 if (l2cap_chan_check_security(chan, false)) {
1432 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1433 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1434 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1435 chan->ops->defer(chan);
1437 } else {
1438 l2cap_state_change(chan, BT_CONFIG);
1439 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1440 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1442 } else {
1443 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1444 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1448 sizeof(rsp), &rsp);
1450 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1451 rsp.result != L2CAP_CR_SUCCESS) {
1452 l2cap_chan_unlock(chan);
1453 continue;
1456 set_bit(CONF_REQ_SENT, &chan->conf_state);
1457 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1458 l2cap_build_conf_req(chan, buf), buf);
1459 chan->num_conf_req++;
1462 l2cap_chan_unlock(chan);
1465 mutex_unlock(&conn->chan_lock);
1468 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1470 struct hci_conn *hcon = conn->hcon;
1471 struct hci_dev *hdev = hcon->hdev;
1473 BT_DBG("%s conn %p", hdev->name, conn);
1475 /* For outgoing pairing which doesn't necessarily have an
1476 * associated socket (e.g. mgmt_pair_device).
1478 if (hcon->out)
1479 smp_conn_security(hcon, hcon->pending_sec_level);
1481 /* For LE slave connections, make sure the connection interval
1482 * is in the range of the minium and maximum interval that has
1483 * been configured for this connection. If not, then trigger
1484 * the connection update procedure.
1486 if (hcon->role == HCI_ROLE_SLAVE &&
1487 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1488 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1489 struct l2cap_conn_param_update_req req;
1491 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1492 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1493 req.latency = cpu_to_le16(hcon->le_conn_latency);
1494 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1496 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1497 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1501 static void l2cap_conn_ready(struct l2cap_conn *conn)
1503 struct l2cap_chan *chan;
1504 struct hci_conn *hcon = conn->hcon;
1506 BT_DBG("conn %p", conn);
1508 if (hcon->type == ACL_LINK)
1509 l2cap_request_info(conn);
1511 mutex_lock(&conn->chan_lock);
1513 list_for_each_entry(chan, &conn->chan_l, list) {
1515 l2cap_chan_lock(chan);
1517 if (chan->scid == L2CAP_CID_A2MP) {
1518 l2cap_chan_unlock(chan);
1519 continue;
1522 if (hcon->type == LE_LINK) {
1523 l2cap_le_start(chan);
1524 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1525 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1526 l2cap_chan_ready(chan);
1527 } else if (chan->state == BT_CONNECT) {
1528 l2cap_do_start(chan);
1531 l2cap_chan_unlock(chan);
1534 mutex_unlock(&conn->chan_lock);
1536 if (hcon->type == LE_LINK)
1537 l2cap_le_conn_ready(conn);
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1545 struct l2cap_chan *chan;
1547 BT_DBG("conn %p", conn);
1549 mutex_lock(&conn->chan_lock);
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1556 mutex_unlock(&conn->chan_lock);
1559 static void l2cap_info_timeout(struct work_struct *work)
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 info_timer.work);
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1567 l2cap_conn_start(conn);
1571 * l2cap_user
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1585 struct hci_dev *hdev = conn->hcon->hdev;
1586 int ret;
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1593 * here, too. */
1595 hci_dev_lock(hdev);
1597 if (user->list.next || user->list.prev) {
1598 ret = -EINVAL;
1599 goto out_unlock;
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1603 if (!conn->hchan) {
1604 ret = -ENODEV;
1605 goto out_unlock;
1608 ret = user->probe(conn, user);
1609 if (ret)
1610 goto out_unlock;
1612 list_add(&user->list, &conn->users);
1613 ret = 0;
1615 out_unlock:
1616 hci_dev_unlock(hdev);
1617 return ret;
1619 EXPORT_SYMBOL(l2cap_register_user);
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1623 struct hci_dev *hdev = conn->hcon->hdev;
1625 hci_dev_lock(hdev);
1627 if (!user->list.next || !user->list.prev)
1628 goto out_unlock;
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1635 out_unlock:
1636 hci_dev_unlock(hdev);
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1642 struct l2cap_user *user;
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1658 if (!conn)
1659 return;
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1663 kfree_skb(conn->rx_skb);
1665 skb_queue_purge(&conn->pending_rx);
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1674 if (work_pending(&conn->id_addr_update_work))
1675 cancel_work_sync(&conn->id_addr_update_work);
1677 l2cap_unregister_all_users(conn);
1679 /* Force the connection to be immediately dropped */
1680 hcon->disc_timeout = 0;
1682 mutex_lock(&conn->chan_lock);
1684 /* Kill channels */
1685 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1686 l2cap_chan_hold(chan);
1687 l2cap_chan_lock(chan);
1689 l2cap_chan_del(chan, err);
1691 l2cap_chan_unlock(chan);
1693 chan->ops->close(chan);
1694 l2cap_chan_put(chan);
1697 mutex_unlock(&conn->chan_lock);
1699 hci_chan_del(conn->hchan);
1701 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1702 cancel_delayed_work_sync(&conn->info_timer);
1704 hcon->l2cap_data = NULL;
1705 conn->hchan = NULL;
1706 l2cap_conn_put(conn);
1709 static void l2cap_conn_free(struct kref *ref)
1711 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1713 hci_conn_put(conn->hcon);
1714 kfree(conn);
1717 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1719 kref_get(&conn->ref);
1720 return conn;
1722 EXPORT_SYMBOL(l2cap_conn_get);
1724 void l2cap_conn_put(struct l2cap_conn *conn)
1726 kref_put(&conn->ref, l2cap_conn_free);
1728 EXPORT_SYMBOL(l2cap_conn_put);
1730 /* ---- Socket interface ---- */
1732 /* Find socket with psm and source / destination bdaddr.
1733 * Returns closest match.
1735 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1736 bdaddr_t *src,
1737 bdaddr_t *dst,
1738 u8 link_type)
1740 struct l2cap_chan *c, *c1 = NULL;
1742 read_lock(&chan_list_lock);
1744 list_for_each_entry(c, &chan_list, global_l) {
1745 if (state && c->state != state)
1746 continue;
1748 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1749 continue;
1751 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1752 continue;
1754 if (c->psm == psm) {
1755 int src_match, dst_match;
1756 int src_any, dst_any;
1758 /* Exact match. */
1759 src_match = !bacmp(&c->src, src);
1760 dst_match = !bacmp(&c->dst, dst);
1761 if (src_match && dst_match) {
1762 l2cap_chan_hold(c);
1763 read_unlock(&chan_list_lock);
1764 return c;
1767 /* Closest match */
1768 src_any = !bacmp(&c->src, BDADDR_ANY);
1769 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1770 if ((src_match && dst_any) || (src_any && dst_match) ||
1771 (src_any && dst_any))
1772 c1 = c;
1776 if (c1)
1777 l2cap_chan_hold(c1);
1779 read_unlock(&chan_list_lock);
1781 return c1;
1784 static void l2cap_monitor_timeout(struct work_struct *work)
1786 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1787 monitor_timer.work);
1789 BT_DBG("chan %p", chan);
1791 l2cap_chan_lock(chan);
1793 if (!chan->conn) {
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1796 return;
1799 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1801 l2cap_chan_unlock(chan);
1802 l2cap_chan_put(chan);
1805 static void l2cap_retrans_timeout(struct work_struct *work)
1807 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1808 retrans_timer.work);
1810 BT_DBG("chan %p", chan);
1812 l2cap_chan_lock(chan);
1814 if (!chan->conn) {
1815 l2cap_chan_unlock(chan);
1816 l2cap_chan_put(chan);
1817 return;
1820 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1821 l2cap_chan_unlock(chan);
1822 l2cap_chan_put(chan);
1825 static void l2cap_streaming_send(struct l2cap_chan *chan,
1826 struct sk_buff_head *skbs)
1828 struct sk_buff *skb;
1829 struct l2cap_ctrl *control;
1831 BT_DBG("chan %p, skbs %p", chan, skbs);
1833 if (__chan_is_moving(chan))
1834 return;
1836 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1838 while (!skb_queue_empty(&chan->tx_q)) {
1840 skb = skb_dequeue(&chan->tx_q);
1842 bt_cb(skb)->control.retries = 1;
1843 control = &bt_cb(skb)->control;
1845 control->reqseq = 0;
1846 control->txseq = chan->next_tx_seq;
1848 __pack_control(chan, control, skb);
1850 if (chan->fcs == L2CAP_FCS_CRC16) {
1851 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1852 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1855 l2cap_do_send(chan, skb);
1857 BT_DBG("Sent txseq %u", control->txseq);
1859 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1860 chan->frames_sent++;
1864 static int l2cap_ertm_send(struct l2cap_chan *chan)
1866 struct sk_buff *skb, *tx_skb;
1867 struct l2cap_ctrl *control;
1868 int sent = 0;
1870 BT_DBG("chan %p", chan);
1872 if (chan->state != BT_CONNECTED)
1873 return -ENOTCONN;
1875 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1876 return 0;
1878 if (__chan_is_moving(chan))
1879 return 0;
1881 while (chan->tx_send_head &&
1882 chan->unacked_frames < chan->remote_tx_win &&
1883 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1885 skb = chan->tx_send_head;
1887 bt_cb(skb)->control.retries = 1;
1888 control = &bt_cb(skb)->control;
1890 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1891 control->final = 1;
1893 control->reqseq = chan->buffer_seq;
1894 chan->last_acked_seq = chan->buffer_seq;
1895 control->txseq = chan->next_tx_seq;
1897 __pack_control(chan, control, skb);
1899 if (chan->fcs == L2CAP_FCS_CRC16) {
1900 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1901 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1904 /* Clone after data has been modified. Data is assumed to be
1905 read-only (for locking purposes) on cloned sk_buffs.
1907 tx_skb = skb_clone(skb, GFP_KERNEL);
1909 if (!tx_skb)
1910 break;
1912 __set_retrans_timer(chan);
1914 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1915 chan->unacked_frames++;
1916 chan->frames_sent++;
1917 sent++;
1919 if (skb_queue_is_last(&chan->tx_q, skb))
1920 chan->tx_send_head = NULL;
1921 else
1922 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1924 l2cap_do_send(chan, tx_skb);
1925 BT_DBG("Sent txseq %u", control->txseq);
1928 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1929 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1931 return sent;
1934 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1936 struct l2cap_ctrl control;
1937 struct sk_buff *skb;
1938 struct sk_buff *tx_skb;
1939 u16 seq;
1941 BT_DBG("chan %p", chan);
1943 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1944 return;
1946 if (__chan_is_moving(chan))
1947 return;
1949 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1950 seq = l2cap_seq_list_pop(&chan->retrans_list);
1952 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1953 if (!skb) {
1954 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1955 seq);
1956 continue;
1959 bt_cb(skb)->control.retries++;
1960 control = bt_cb(skb)->control;
1962 if (chan->max_tx != 0 &&
1963 bt_cb(skb)->control.retries > chan->max_tx) {
1964 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1965 l2cap_send_disconn_req(chan, ECONNRESET);
1966 l2cap_seq_list_clear(&chan->retrans_list);
1967 break;
1970 control.reqseq = chan->buffer_seq;
1971 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1972 control.final = 1;
1973 else
1974 control.final = 0;
1976 if (skb_cloned(skb)) {
1977 /* Cloned sk_buffs are read-only, so we need a
1978 * writeable copy
1980 tx_skb = skb_copy(skb, GFP_KERNEL);
1981 } else {
1982 tx_skb = skb_clone(skb, GFP_KERNEL);
1985 if (!tx_skb) {
1986 l2cap_seq_list_clear(&chan->retrans_list);
1987 break;
1990 /* Update skb contents */
1991 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1992 put_unaligned_le32(__pack_extended_control(&control),
1993 tx_skb->data + L2CAP_HDR_SIZE);
1994 } else {
1995 put_unaligned_le16(__pack_enhanced_control(&control),
1996 tx_skb->data + L2CAP_HDR_SIZE);
1999 /* Update FCS */
2000 if (chan->fcs == L2CAP_FCS_CRC16) {
2001 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2002 tx_skb->len - L2CAP_FCS_SIZE);
2003 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2004 L2CAP_FCS_SIZE);
2007 l2cap_do_send(chan, tx_skb);
2009 BT_DBG("Resent txseq %d", control.txseq);
2011 chan->last_acked_seq = chan->buffer_seq;
2015 static void l2cap_retransmit(struct l2cap_chan *chan,
2016 struct l2cap_ctrl *control)
2018 BT_DBG("chan %p, control %p", chan, control);
2020 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2021 l2cap_ertm_resend(chan);
2024 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2025 struct l2cap_ctrl *control)
2027 struct sk_buff *skb;
2029 BT_DBG("chan %p, control %p", chan, control);
2031 if (control->poll)
2032 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2034 l2cap_seq_list_clear(&chan->retrans_list);
2036 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 return;
2039 if (chan->unacked_frames) {
2040 skb_queue_walk(&chan->tx_q, skb) {
2041 if (bt_cb(skb)->control.txseq == control->reqseq ||
2042 skb == chan->tx_send_head)
2043 break;
2046 skb_queue_walk_from(&chan->tx_q, skb) {
2047 if (skb == chan->tx_send_head)
2048 break;
2050 l2cap_seq_list_append(&chan->retrans_list,
2051 bt_cb(skb)->control.txseq);
2054 l2cap_ertm_resend(chan);
2058 static void l2cap_send_ack(struct l2cap_chan *chan)
2060 struct l2cap_ctrl control;
2061 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2062 chan->last_acked_seq);
2063 int threshold;
2065 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2066 chan, chan->last_acked_seq, chan->buffer_seq);
2068 memset(&control, 0, sizeof(control));
2069 control.sframe = 1;
2071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2072 chan->rx_state == L2CAP_RX_STATE_RECV) {
2073 __clear_ack_timer(chan);
2074 control.super = L2CAP_SUPER_RNR;
2075 control.reqseq = chan->buffer_seq;
2076 l2cap_send_sframe(chan, &control);
2077 } else {
2078 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2079 l2cap_ertm_send(chan);
2080 /* If any i-frames were sent, they included an ack */
2081 if (chan->buffer_seq == chan->last_acked_seq)
2082 frames_to_ack = 0;
2085 /* Ack now if the window is 3/4ths full.
2086 * Calculate without mul or div
2088 threshold = chan->ack_win;
2089 threshold += threshold << 1;
2090 threshold >>= 2;
2092 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2093 threshold);
2095 if (frames_to_ack >= threshold) {
2096 __clear_ack_timer(chan);
2097 control.super = L2CAP_SUPER_RR;
2098 control.reqseq = chan->buffer_seq;
2099 l2cap_send_sframe(chan, &control);
2100 frames_to_ack = 0;
2103 if (frames_to_ack)
2104 __set_ack_timer(chan);
2108 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2109 struct msghdr *msg, int len,
2110 int count, struct sk_buff *skb)
2112 struct l2cap_conn *conn = chan->conn;
2113 struct sk_buff **frag;
2114 int sent = 0;
2116 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2117 return -EFAULT;
2119 sent += count;
2120 len -= count;
2122 /* Continuation fragments (no L2CAP header) */
2123 frag = &skb_shinfo(skb)->frag_list;
2124 while (len) {
2125 struct sk_buff *tmp;
2127 count = min_t(unsigned int, conn->mtu, len);
2129 tmp = chan->ops->alloc_skb(chan, 0, count,
2130 msg->msg_flags & MSG_DONTWAIT);
2131 if (IS_ERR(tmp))
2132 return PTR_ERR(tmp);
2134 *frag = tmp;
2136 if (copy_from_iter(skb_put(*frag, count), count,
2137 &msg->msg_iter) != count)
2138 return -EFAULT;
2140 sent += count;
2141 len -= count;
2143 skb->len += (*frag)->len;
2144 skb->data_len += (*frag)->len;
2146 frag = &(*frag)->next;
2149 return sent;
2152 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2153 struct msghdr *msg, size_t len)
2155 struct l2cap_conn *conn = chan->conn;
2156 struct sk_buff *skb;
2157 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2158 struct l2cap_hdr *lh;
2160 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2161 __le16_to_cpu(chan->psm), len);
2163 count = min_t(unsigned int, (conn->mtu - hlen), len);
2165 skb = chan->ops->alloc_skb(chan, hlen, count,
2166 msg->msg_flags & MSG_DONTWAIT);
2167 if (IS_ERR(skb))
2168 return skb;
2170 /* Create L2CAP header */
2171 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2172 lh->cid = cpu_to_le16(chan->dcid);
2173 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2174 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2176 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2177 if (unlikely(err < 0)) {
2178 kfree_skb(skb);
2179 return ERR_PTR(err);
2181 return skb;
2184 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2185 struct msghdr *msg, size_t len)
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2189 int err, count;
2190 struct l2cap_hdr *lh;
2192 BT_DBG("chan %p len %zu", chan, len);
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2196 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2197 msg->msg_flags & MSG_DONTWAIT);
2198 if (IS_ERR(skb))
2199 return skb;
2201 /* Create L2CAP header */
2202 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2203 lh->cid = cpu_to_le16(chan->dcid);
2204 lh->len = cpu_to_le16(len);
2206 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 if (unlikely(err < 0)) {
2208 kfree_skb(skb);
2209 return ERR_PTR(err);
2211 return skb;
2214 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len,
2216 u16 sdulen)
2218 struct l2cap_conn *conn = chan->conn;
2219 struct sk_buff *skb;
2220 int err, count, hlen;
2221 struct l2cap_hdr *lh;
2223 BT_DBG("chan %p len %zu", chan, len);
2225 if (!conn)
2226 return ERR_PTR(-ENOTCONN);
2228 hlen = __ertm_hdr_size(chan);
2230 if (sdulen)
2231 hlen += L2CAP_SDULEN_SIZE;
2233 if (chan->fcs == L2CAP_FCS_CRC16)
2234 hlen += L2CAP_FCS_SIZE;
2236 count = min_t(unsigned int, (conn->mtu - hlen), len);
2238 skb = chan->ops->alloc_skb(chan, hlen, count,
2239 msg->msg_flags & MSG_DONTWAIT);
2240 if (IS_ERR(skb))
2241 return skb;
2243 /* Create L2CAP header */
2244 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2245 lh->cid = cpu_to_le16(chan->dcid);
2246 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2248 /* Control header is populated later */
2249 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2250 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2251 else
2252 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2254 if (sdulen)
2255 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2257 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258 if (unlikely(err < 0)) {
2259 kfree_skb(skb);
2260 return ERR_PTR(err);
2263 bt_cb(skb)->control.fcs = chan->fcs;
2264 bt_cb(skb)->control.retries = 0;
2265 return skb;
2268 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2269 struct sk_buff_head *seg_queue,
2270 struct msghdr *msg, size_t len)
2272 struct sk_buff *skb;
2273 u16 sdu_len;
2274 size_t pdu_len;
2275 u8 sar;
2277 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2279 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280 * so fragmented skbs are not used. The HCI layer's handling
2281 * of fragmented skbs is not compatible with ERTM's queueing.
2284 /* PDU size is derived from the HCI MTU */
2285 pdu_len = chan->conn->mtu;
2287 /* Constrain PDU size for BR/EDR connections */
2288 if (!chan->hs_hcon)
2289 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2291 /* Adjust for largest possible L2CAP overhead. */
2292 if (chan->fcs)
2293 pdu_len -= L2CAP_FCS_SIZE;
2295 pdu_len -= __ertm_hdr_size(chan);
2297 /* Remote device may have requested smaller PDUs */
2298 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2300 if (len <= pdu_len) {
2301 sar = L2CAP_SAR_UNSEGMENTED;
2302 sdu_len = 0;
2303 pdu_len = len;
2304 } else {
2305 sar = L2CAP_SAR_START;
2306 sdu_len = len;
2309 while (len > 0) {
2310 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2312 if (IS_ERR(skb)) {
2313 __skb_queue_purge(seg_queue);
2314 return PTR_ERR(skb);
2317 bt_cb(skb)->control.sar = sar;
2318 __skb_queue_tail(seg_queue, skb);
2320 len -= pdu_len;
2321 if (sdu_len)
2322 sdu_len = 0;
2324 if (len <= pdu_len) {
2325 sar = L2CAP_SAR_END;
2326 pdu_len = len;
2327 } else {
2328 sar = L2CAP_SAR_CONTINUE;
2332 return 0;
2335 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2336 struct msghdr *msg,
2337 size_t len, u16 sdulen)
2339 struct l2cap_conn *conn = chan->conn;
2340 struct sk_buff *skb;
2341 int err, count, hlen;
2342 struct l2cap_hdr *lh;
2344 BT_DBG("chan %p len %zu", chan, len);
2346 if (!conn)
2347 return ERR_PTR(-ENOTCONN);
2349 hlen = L2CAP_HDR_SIZE;
2351 if (sdulen)
2352 hlen += L2CAP_SDULEN_SIZE;
2354 count = min_t(unsigned int, (conn->mtu - hlen), len);
2356 skb = chan->ops->alloc_skb(chan, hlen, count,
2357 msg->msg_flags & MSG_DONTWAIT);
2358 if (IS_ERR(skb))
2359 return skb;
2361 /* Create L2CAP header */
2362 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2363 lh->cid = cpu_to_le16(chan->dcid);
2364 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2366 if (sdulen)
2367 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2369 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2370 if (unlikely(err < 0)) {
2371 kfree_skb(skb);
2372 return ERR_PTR(err);
2375 return skb;
2378 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2379 struct sk_buff_head *seg_queue,
2380 struct msghdr *msg, size_t len)
2382 struct sk_buff *skb;
2383 size_t pdu_len;
2384 u16 sdu_len;
2386 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2388 sdu_len = len;
2389 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2391 while (len > 0) {
2392 if (len <= pdu_len)
2393 pdu_len = len;
2395 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2396 if (IS_ERR(skb)) {
2397 __skb_queue_purge(seg_queue);
2398 return PTR_ERR(skb);
2401 __skb_queue_tail(seg_queue, skb);
2403 len -= pdu_len;
2405 if (sdu_len) {
2406 sdu_len = 0;
2407 pdu_len += L2CAP_SDULEN_SIZE;
2411 return 0;
2414 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2416 struct sk_buff *skb;
2417 int err;
2418 struct sk_buff_head seg_queue;
2420 if (!chan->conn)
2421 return -ENOTCONN;
2423 /* Connectionless channel */
2424 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2425 skb = l2cap_create_connless_pdu(chan, msg, len);
2426 if (IS_ERR(skb))
2427 return PTR_ERR(skb);
2429 /* Channel lock is released before requesting new skb and then
2430 * reacquired thus we need to recheck channel state.
2432 if (chan->state != BT_CONNECTED) {
2433 kfree_skb(skb);
2434 return -ENOTCONN;
2437 l2cap_do_send(chan, skb);
2438 return len;
2441 switch (chan->mode) {
2442 case L2CAP_MODE_LE_FLOWCTL:
2443 /* Check outgoing MTU */
2444 if (len > chan->omtu)
2445 return -EMSGSIZE;
2447 if (!chan->tx_credits)
2448 return -EAGAIN;
2450 __skb_queue_head_init(&seg_queue);
2452 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2454 if (chan->state != BT_CONNECTED) {
2455 __skb_queue_purge(&seg_queue);
2456 err = -ENOTCONN;
2459 if (err)
2460 return err;
2462 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2464 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2465 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2466 chan->tx_credits--;
2469 if (!chan->tx_credits)
2470 chan->ops->suspend(chan);
2472 err = len;
2474 break;
2476 case L2CAP_MODE_BASIC:
2477 /* Check outgoing MTU */
2478 if (len > chan->omtu)
2479 return -EMSGSIZE;
2481 /* Create a basic PDU */
2482 skb = l2cap_create_basic_pdu(chan, msg, len);
2483 if (IS_ERR(skb))
2484 return PTR_ERR(skb);
2486 /* Channel lock is released before requesting new skb and then
2487 * reacquired thus we need to recheck channel state.
2489 if (chan->state != BT_CONNECTED) {
2490 kfree_skb(skb);
2491 return -ENOTCONN;
2494 l2cap_do_send(chan, skb);
2495 err = len;
2496 break;
2498 case L2CAP_MODE_ERTM:
2499 case L2CAP_MODE_STREAMING:
2500 /* Check outgoing MTU */
2501 if (len > chan->omtu) {
2502 err = -EMSGSIZE;
2503 break;
2506 __skb_queue_head_init(&seg_queue);
2508 /* Do segmentation before calling in to the state machine,
2509 * since it's possible to block while waiting for memory
2510 * allocation.
2512 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2514 /* The channel could have been closed while segmenting,
2515 * check that it is still connected.
2517 if (chan->state != BT_CONNECTED) {
2518 __skb_queue_purge(&seg_queue);
2519 err = -ENOTCONN;
2522 if (err)
2523 break;
2525 if (chan->mode == L2CAP_MODE_ERTM)
2526 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2527 else
2528 l2cap_streaming_send(chan, &seg_queue);
2530 err = len;
2532 /* If the skbs were not queued for sending, they'll still be in
2533 * seg_queue and need to be purged.
2535 __skb_queue_purge(&seg_queue);
2536 break;
2538 default:
2539 BT_DBG("bad state %1.1x", chan->mode);
2540 err = -EBADFD;
2543 return err;
2545 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2547 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2549 struct l2cap_ctrl control;
2550 u16 seq;
2552 BT_DBG("chan %p, txseq %u", chan, txseq);
2554 memset(&control, 0, sizeof(control));
2555 control.sframe = 1;
2556 control.super = L2CAP_SUPER_SREJ;
2558 for (seq = chan->expected_tx_seq; seq != txseq;
2559 seq = __next_seq(chan, seq)) {
2560 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2561 control.reqseq = seq;
2562 l2cap_send_sframe(chan, &control);
2563 l2cap_seq_list_append(&chan->srej_list, seq);
2567 chan->expected_tx_seq = __next_seq(chan, txseq);
2570 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2572 struct l2cap_ctrl control;
2574 BT_DBG("chan %p", chan);
2576 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2577 return;
2579 memset(&control, 0, sizeof(control));
2580 control.sframe = 1;
2581 control.super = L2CAP_SUPER_SREJ;
2582 control.reqseq = chan->srej_list.tail;
2583 l2cap_send_sframe(chan, &control);
2586 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2588 struct l2cap_ctrl control;
2589 u16 initial_head;
2590 u16 seq;
2592 BT_DBG("chan %p, txseq %u", chan, txseq);
2594 memset(&control, 0, sizeof(control));
2595 control.sframe = 1;
2596 control.super = L2CAP_SUPER_SREJ;
2598 /* Capture initial list head to allow only one pass through the list. */
2599 initial_head = chan->srej_list.head;
2601 do {
2602 seq = l2cap_seq_list_pop(&chan->srej_list);
2603 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2604 break;
2606 control.reqseq = seq;
2607 l2cap_send_sframe(chan, &control);
2608 l2cap_seq_list_append(&chan->srej_list, seq);
2609 } while (chan->srej_list.head != initial_head);
2612 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2614 struct sk_buff *acked_skb;
2615 u16 ackseq;
2617 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2619 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2620 return;
2622 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2623 chan->expected_ack_seq, chan->unacked_frames);
2625 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2626 ackseq = __next_seq(chan, ackseq)) {
2628 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2629 if (acked_skb) {
2630 skb_unlink(acked_skb, &chan->tx_q);
2631 kfree_skb(acked_skb);
2632 chan->unacked_frames--;
2636 chan->expected_ack_seq = reqseq;
2638 if (chan->unacked_frames == 0)
2639 __clear_retrans_timer(chan);
2641 BT_DBG("unacked_frames %u", chan->unacked_frames);
2644 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2646 BT_DBG("chan %p", chan);
2648 chan->expected_tx_seq = chan->buffer_seq;
2649 l2cap_seq_list_clear(&chan->srej_list);
2650 skb_queue_purge(&chan->srej_q);
2651 chan->rx_state = L2CAP_RX_STATE_RECV;
2654 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2655 struct l2cap_ctrl *control,
2656 struct sk_buff_head *skbs, u8 event)
2658 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2659 event);
2661 switch (event) {
2662 case L2CAP_EV_DATA_REQUEST:
2663 if (chan->tx_send_head == NULL)
2664 chan->tx_send_head = skb_peek(skbs);
2666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2667 l2cap_ertm_send(chan);
2668 break;
2669 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2670 BT_DBG("Enter LOCAL_BUSY");
2671 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2673 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2674 /* The SREJ_SENT state must be aborted if we are to
2675 * enter the LOCAL_BUSY state.
2677 l2cap_abort_rx_srej_sent(chan);
2680 l2cap_send_ack(chan);
2682 break;
2683 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2684 BT_DBG("Exit LOCAL_BUSY");
2685 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2687 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2688 struct l2cap_ctrl local_control;
2690 memset(&local_control, 0, sizeof(local_control));
2691 local_control.sframe = 1;
2692 local_control.super = L2CAP_SUPER_RR;
2693 local_control.poll = 1;
2694 local_control.reqseq = chan->buffer_seq;
2695 l2cap_send_sframe(chan, &local_control);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 break;
2702 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2703 l2cap_process_reqseq(chan, control->reqseq);
2704 break;
2705 case L2CAP_EV_EXPLICIT_POLL:
2706 l2cap_send_rr_or_rnr(chan, 1);
2707 chan->retry_count = 1;
2708 __set_monitor_timer(chan);
2709 __clear_ack_timer(chan);
2710 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2711 break;
2712 case L2CAP_EV_RETRANS_TO:
2713 l2cap_send_rr_or_rnr(chan, 1);
2714 chan->retry_count = 1;
2715 __set_monitor_timer(chan);
2716 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2717 break;
2718 case L2CAP_EV_RECV_FBIT:
2719 /* Nothing to process */
2720 break;
2721 default:
2722 break;
2726 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2727 struct l2cap_ctrl *control,
2728 struct sk_buff_head *skbs, u8 event)
2730 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2731 event);
2733 switch (event) {
2734 case L2CAP_EV_DATA_REQUEST:
2735 if (chan->tx_send_head == NULL)
2736 chan->tx_send_head = skb_peek(skbs);
2737 /* Queue data, but don't send. */
2738 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2739 break;
2740 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2741 BT_DBG("Enter LOCAL_BUSY");
2742 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2744 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2745 /* The SREJ_SENT state must be aborted if we are to
2746 * enter the LOCAL_BUSY state.
2748 l2cap_abort_rx_srej_sent(chan);
2751 l2cap_send_ack(chan);
2753 break;
2754 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2755 BT_DBG("Exit LOCAL_BUSY");
2756 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2758 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2759 struct l2cap_ctrl local_control;
2760 memset(&local_control, 0, sizeof(local_control));
2761 local_control.sframe = 1;
2762 local_control.super = L2CAP_SUPER_RR;
2763 local_control.poll = 1;
2764 local_control.reqseq = chan->buffer_seq;
2765 l2cap_send_sframe(chan, &local_control);
2767 chan->retry_count = 1;
2768 __set_monitor_timer(chan);
2769 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2771 break;
2772 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2773 l2cap_process_reqseq(chan, control->reqseq);
2775 /* Fall through */
2777 case L2CAP_EV_RECV_FBIT:
2778 if (control && control->final) {
2779 __clear_monitor_timer(chan);
2780 if (chan->unacked_frames > 0)
2781 __set_retrans_timer(chan);
2782 chan->retry_count = 0;
2783 chan->tx_state = L2CAP_TX_STATE_XMIT;
2784 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2786 break;
2787 case L2CAP_EV_EXPLICIT_POLL:
2788 /* Ignore */
2789 break;
2790 case L2CAP_EV_MONITOR_TO:
2791 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2792 l2cap_send_rr_or_rnr(chan, 1);
2793 __set_monitor_timer(chan);
2794 chan->retry_count++;
2795 } else {
2796 l2cap_send_disconn_req(chan, ECONNABORTED);
2798 break;
2799 default:
2800 break;
2804 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2805 struct sk_buff_head *skbs, u8 event)
2807 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2808 chan, control, skbs, event, chan->tx_state);
2810 switch (chan->tx_state) {
2811 case L2CAP_TX_STATE_XMIT:
2812 l2cap_tx_state_xmit(chan, control, skbs, event);
2813 break;
2814 case L2CAP_TX_STATE_WAIT_F:
2815 l2cap_tx_state_wait_f(chan, control, skbs, event);
2816 break;
2817 default:
2818 /* Ignore event */
2819 break;
2823 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2824 struct l2cap_ctrl *control)
2826 BT_DBG("chan %p, control %p", chan, control);
2827 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2830 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2831 struct l2cap_ctrl *control)
2833 BT_DBG("chan %p, control %p", chan, control);
2834 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2837 /* Copy frame to all raw sockets on that connection */
2838 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2840 struct sk_buff *nskb;
2841 struct l2cap_chan *chan;
2843 BT_DBG("conn %p", conn);
2845 mutex_lock(&conn->chan_lock);
2847 list_for_each_entry(chan, &conn->chan_l, list) {
2848 if (chan->chan_type != L2CAP_CHAN_RAW)
2849 continue;
2851 /* Don't send frame to the channel it came from */
2852 if (bt_cb(skb)->chan == chan)
2853 continue;
2855 nskb = skb_clone(skb, GFP_KERNEL);
2856 if (!nskb)
2857 continue;
2858 if (chan->ops->recv(chan, nskb))
2859 kfree_skb(nskb);
2862 mutex_unlock(&conn->chan_lock);
2865 /* ---- L2CAP signalling commands ---- */
2866 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2867 u8 ident, u16 dlen, void *data)
2869 struct sk_buff *skb, **frag;
2870 struct l2cap_cmd_hdr *cmd;
2871 struct l2cap_hdr *lh;
2872 int len, count;
2874 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2875 conn, code, ident, dlen);
2877 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2878 return NULL;
2880 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2881 count = min_t(unsigned int, conn->mtu, len);
2883 skb = bt_skb_alloc(count, GFP_KERNEL);
2884 if (!skb)
2885 return NULL;
2887 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2888 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2890 if (conn->hcon->type == LE_LINK)
2891 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2892 else
2893 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2895 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2896 cmd->code = code;
2897 cmd->ident = ident;
2898 cmd->len = cpu_to_le16(dlen);
2900 if (dlen) {
2901 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2902 memcpy(skb_put(skb, count), data, count);
2903 data += count;
2906 len -= skb->len;
2908 /* Continuation fragments (no L2CAP header) */
2909 frag = &skb_shinfo(skb)->frag_list;
2910 while (len) {
2911 count = min_t(unsigned int, conn->mtu, len);
2913 *frag = bt_skb_alloc(count, GFP_KERNEL);
2914 if (!*frag)
2915 goto fail;
2917 memcpy(skb_put(*frag, count), data, count);
2919 len -= count;
2920 data += count;
2922 frag = &(*frag)->next;
2925 return skb;
2927 fail:
2928 kfree_skb(skb);
2929 return NULL;
2932 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2933 unsigned long *val)
2935 struct l2cap_conf_opt *opt = *ptr;
2936 int len;
2938 len = L2CAP_CONF_OPT_SIZE + opt->len;
2939 *ptr += len;
2941 *type = opt->type;
2942 *olen = opt->len;
2944 switch (opt->len) {
2945 case 1:
2946 *val = *((u8 *) opt->val);
2947 break;
2949 case 2:
2950 *val = get_unaligned_le16(opt->val);
2951 break;
2953 case 4:
2954 *val = get_unaligned_le32(opt->val);
2955 break;
2957 default:
2958 *val = (unsigned long) opt->val;
2959 break;
2962 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2963 return len;
2966 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2968 struct l2cap_conf_opt *opt = *ptr;
2970 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2972 opt->type = type;
2973 opt->len = len;
2975 switch (len) {
2976 case 1:
2977 *((u8 *) opt->val) = val;
2978 break;
2980 case 2:
2981 put_unaligned_le16(val, opt->val);
2982 break;
2984 case 4:
2985 put_unaligned_le32(val, opt->val);
2986 break;
2988 default:
2989 memcpy(opt->val, (void *) val, len);
2990 break;
2993 *ptr += L2CAP_CONF_OPT_SIZE + len;
2996 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2998 struct l2cap_conf_efs efs;
3000 switch (chan->mode) {
3001 case L2CAP_MODE_ERTM:
3002 efs.id = chan->local_id;
3003 efs.stype = chan->local_stype;
3004 efs.msdu = cpu_to_le16(chan->local_msdu);
3005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3006 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3007 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3008 break;
3010 case L2CAP_MODE_STREAMING:
3011 efs.id = 1;
3012 efs.stype = L2CAP_SERV_BESTEFFORT;
3013 efs.msdu = cpu_to_le16(chan->local_msdu);
3014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3015 efs.acc_lat = 0;
3016 efs.flush_to = 0;
3017 break;
3019 default:
3020 return;
3023 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3024 (unsigned long) &efs);
3027 static void l2cap_ack_timeout(struct work_struct *work)
3029 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3030 ack_timer.work);
3031 u16 frames_to_ack;
3033 BT_DBG("chan %p", chan);
3035 l2cap_chan_lock(chan);
3037 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3038 chan->last_acked_seq);
3040 if (frames_to_ack)
3041 l2cap_send_rr_or_rnr(chan, 0);
3043 l2cap_chan_unlock(chan);
3044 l2cap_chan_put(chan);
3047 int l2cap_ertm_init(struct l2cap_chan *chan)
3049 int err;
3051 chan->next_tx_seq = 0;
3052 chan->expected_tx_seq = 0;
3053 chan->expected_ack_seq = 0;
3054 chan->unacked_frames = 0;
3055 chan->buffer_seq = 0;
3056 chan->frames_sent = 0;
3057 chan->last_acked_seq = 0;
3058 chan->sdu = NULL;
3059 chan->sdu_last_frag = NULL;
3060 chan->sdu_len = 0;
3062 skb_queue_head_init(&chan->tx_q);
3064 chan->local_amp_id = AMP_ID_BREDR;
3065 chan->move_id = AMP_ID_BREDR;
3066 chan->move_state = L2CAP_MOVE_STABLE;
3067 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3069 if (chan->mode != L2CAP_MODE_ERTM)
3070 return 0;
3072 chan->rx_state = L2CAP_RX_STATE_RECV;
3073 chan->tx_state = L2CAP_TX_STATE_XMIT;
3075 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3076 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3077 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3079 skb_queue_head_init(&chan->srej_q);
3081 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3082 if (err < 0)
3083 return err;
3085 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3086 if (err < 0)
3087 l2cap_seq_list_free(&chan->srej_list);
3089 return err;
3092 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3094 switch (mode) {
3095 case L2CAP_MODE_STREAMING:
3096 case L2CAP_MODE_ERTM:
3097 if (l2cap_mode_supported(mode, remote_feat_mask))
3098 return mode;
3099 /* fall through */
3100 default:
3101 return L2CAP_MODE_BASIC;
3105 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3107 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3108 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3111 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3113 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3114 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3117 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3118 struct l2cap_conf_rfc *rfc)
3120 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3121 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3123 /* Class 1 devices have must have ERTM timeouts
3124 * exceeding the Link Supervision Timeout. The
3125 * default Link Supervision Timeout for AMP
3126 * controllers is 10 seconds.
3128 * Class 1 devices use 0xffffffff for their
3129 * best-effort flush timeout, so the clamping logic
3130 * will result in a timeout that meets the above
3131 * requirement. ERTM timeouts are 16-bit values, so
3132 * the maximum timeout is 65.535 seconds.
3135 /* Convert timeout to milliseconds and round */
3136 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3138 /* This is the recommended formula for class 2 devices
3139 * that start ERTM timers when packets are sent to the
3140 * controller.
3142 ertm_to = 3 * ertm_to + 500;
3144 if (ertm_to > 0xffff)
3145 ertm_to = 0xffff;
3147 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3148 rfc->monitor_timeout = rfc->retrans_timeout;
3149 } else {
3150 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3151 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3155 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3157 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3158 __l2cap_ews_supported(chan->conn)) {
3159 /* use extended control field */
3160 set_bit(FLAG_EXT_CTRL, &chan->flags);
3161 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3162 } else {
3163 chan->tx_win = min_t(u16, chan->tx_win,
3164 L2CAP_DEFAULT_TX_WINDOW);
3165 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3167 chan->ack_win = chan->tx_win;
3170 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3172 struct l2cap_conf_req *req = data;
3173 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3174 void *ptr = req->data;
3175 u16 size;
3177 BT_DBG("chan %p", chan);
3179 if (chan->num_conf_req || chan->num_conf_rsp)
3180 goto done;
3182 switch (chan->mode) {
3183 case L2CAP_MODE_STREAMING:
3184 case L2CAP_MODE_ERTM:
3185 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3186 break;
3188 if (__l2cap_efs_supported(chan->conn))
3189 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3191 /* fall through */
3192 default:
3193 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3194 break;
3197 done:
3198 if (chan->imtu != L2CAP_DEFAULT_MTU)
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3201 switch (chan->mode) {
3202 case L2CAP_MODE_BASIC:
3203 if (disable_ertm)
3204 break;
3206 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3207 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3208 break;
3210 rfc.mode = L2CAP_MODE_BASIC;
3211 rfc.txwin_size = 0;
3212 rfc.max_transmit = 0;
3213 rfc.retrans_timeout = 0;
3214 rfc.monitor_timeout = 0;
3215 rfc.max_pdu_size = 0;
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 (unsigned long) &rfc);
3219 break;
3221 case L2CAP_MODE_ERTM:
3222 rfc.mode = L2CAP_MODE_ERTM;
3223 rfc.max_transmit = chan->max_tx;
3225 __l2cap_set_ertm_timeouts(chan, &rfc);
3227 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3228 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3229 L2CAP_FCS_SIZE);
3230 rfc.max_pdu_size = cpu_to_le16(size);
3232 l2cap_txwin_setup(chan);
3234 rfc.txwin_size = min_t(u16, chan->tx_win,
3235 L2CAP_DEFAULT_TX_WINDOW);
3237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3238 (unsigned long) &rfc);
3240 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3241 l2cap_add_opt_efs(&ptr, chan);
3243 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3245 chan->tx_win);
3247 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3248 if (chan->fcs == L2CAP_FCS_NONE ||
3249 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3250 chan->fcs = L2CAP_FCS_NONE;
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3252 chan->fcs);
3254 break;
3256 case L2CAP_MODE_STREAMING:
3257 l2cap_txwin_setup(chan);
3258 rfc.mode = L2CAP_MODE_STREAMING;
3259 rfc.txwin_size = 0;
3260 rfc.max_transmit = 0;
3261 rfc.retrans_timeout = 0;
3262 rfc.monitor_timeout = 0;
3264 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3265 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3266 L2CAP_FCS_SIZE);
3267 rfc.max_pdu_size = cpu_to_le16(size);
3269 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3270 (unsigned long) &rfc);
3272 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3273 l2cap_add_opt_efs(&ptr, chan);
3275 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3276 if (chan->fcs == L2CAP_FCS_NONE ||
3277 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3278 chan->fcs = L2CAP_FCS_NONE;
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3280 chan->fcs);
3282 break;
3285 req->dcid = cpu_to_le16(chan->dcid);
3286 req->flags = cpu_to_le16(0);
3288 return ptr - data;
3291 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3293 struct l2cap_conf_rsp *rsp = data;
3294 void *ptr = rsp->data;
3295 void *req = chan->conf_req;
3296 int len = chan->conf_len;
3297 int type, hint, olen;
3298 unsigned long val;
3299 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3300 struct l2cap_conf_efs efs;
3301 u8 remote_efs = 0;
3302 u16 mtu = L2CAP_DEFAULT_MTU;
3303 u16 result = L2CAP_CONF_SUCCESS;
3304 u16 size;
3306 BT_DBG("chan %p", chan);
3308 while (len >= L2CAP_CONF_OPT_SIZE) {
3309 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3311 hint = type & L2CAP_CONF_HINT;
3312 type &= L2CAP_CONF_MASK;
3314 switch (type) {
3315 case L2CAP_CONF_MTU:
3316 mtu = val;
3317 break;
3319 case L2CAP_CONF_FLUSH_TO:
3320 chan->flush_to = val;
3321 break;
3323 case L2CAP_CONF_QOS:
3324 break;
3326 case L2CAP_CONF_RFC:
3327 if (olen == sizeof(rfc))
3328 memcpy(&rfc, (void *) val, olen);
3329 break;
3331 case L2CAP_CONF_FCS:
3332 if (val == L2CAP_FCS_NONE)
3333 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3334 break;
3336 case L2CAP_CONF_EFS:
3337 remote_efs = 1;
3338 if (olen == sizeof(efs))
3339 memcpy(&efs, (void *) val, olen);
3340 break;
3342 case L2CAP_CONF_EWS:
3343 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3344 return -ECONNREFUSED;
3346 set_bit(FLAG_EXT_CTRL, &chan->flags);
3347 set_bit(CONF_EWS_RECV, &chan->conf_state);
3348 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3349 chan->remote_tx_win = val;
3350 break;
3352 default:
3353 if (hint)
3354 break;
3356 result = L2CAP_CONF_UNKNOWN;
3357 *((u8 *) ptr++) = type;
3358 break;
3362 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3363 goto done;
3365 switch (chan->mode) {
3366 case L2CAP_MODE_STREAMING:
3367 case L2CAP_MODE_ERTM:
3368 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3369 chan->mode = l2cap_select_mode(rfc.mode,
3370 chan->conn->feat_mask);
3371 break;
3374 if (remote_efs) {
3375 if (__l2cap_efs_supported(chan->conn))
3376 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3377 else
3378 return -ECONNREFUSED;
3381 if (chan->mode != rfc.mode)
3382 return -ECONNREFUSED;
3384 break;
3387 done:
3388 if (chan->mode != rfc.mode) {
3389 result = L2CAP_CONF_UNACCEPT;
3390 rfc.mode = chan->mode;
3392 if (chan->num_conf_rsp == 1)
3393 return -ECONNREFUSED;
3395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3396 (unsigned long) &rfc);
3399 if (result == L2CAP_CONF_SUCCESS) {
3400 /* Configure output options and let the other side know
3401 * which ones we don't like. */
3403 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3404 result = L2CAP_CONF_UNACCEPT;
3405 else {
3406 chan->omtu = mtu;
3407 set_bit(CONF_MTU_DONE, &chan->conf_state);
3409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3411 if (remote_efs) {
3412 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3413 efs.stype != L2CAP_SERV_NOTRAFIC &&
3414 efs.stype != chan->local_stype) {
3416 result = L2CAP_CONF_UNACCEPT;
3418 if (chan->num_conf_req >= 1)
3419 return -ECONNREFUSED;
3421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3422 sizeof(efs),
3423 (unsigned long) &efs);
3424 } else {
3425 /* Send PENDING Conf Rsp */
3426 result = L2CAP_CONF_PENDING;
3427 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3431 switch (rfc.mode) {
3432 case L2CAP_MODE_BASIC:
3433 chan->fcs = L2CAP_FCS_NONE;
3434 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435 break;
3437 case L2CAP_MODE_ERTM:
3438 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3439 chan->remote_tx_win = rfc.txwin_size;
3440 else
3441 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3443 chan->remote_max_tx = rfc.max_transmit;
3445 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3446 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3447 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3448 rfc.max_pdu_size = cpu_to_le16(size);
3449 chan->remote_mps = size;
3451 __l2cap_set_ertm_timeouts(chan, &rfc);
3453 set_bit(CONF_MODE_DONE, &chan->conf_state);
3455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3456 sizeof(rfc), (unsigned long) &rfc);
3458 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3459 chan->remote_id = efs.id;
3460 chan->remote_stype = efs.stype;
3461 chan->remote_msdu = le16_to_cpu(efs.msdu);
3462 chan->remote_flush_to =
3463 le32_to_cpu(efs.flush_to);
3464 chan->remote_acc_lat =
3465 le32_to_cpu(efs.acc_lat);
3466 chan->remote_sdu_itime =
3467 le32_to_cpu(efs.sdu_itime);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3469 sizeof(efs),
3470 (unsigned long) &efs);
3472 break;
3474 case L2CAP_MODE_STREAMING:
3475 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3476 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3477 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3478 rfc.max_pdu_size = cpu_to_le16(size);
3479 chan->remote_mps = size;
3481 set_bit(CONF_MODE_DONE, &chan->conf_state);
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3484 (unsigned long) &rfc);
3486 break;
3488 default:
3489 result = L2CAP_CONF_UNACCEPT;
3491 memset(&rfc, 0, sizeof(rfc));
3492 rfc.mode = chan->mode;
3495 if (result == L2CAP_CONF_SUCCESS)
3496 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3498 rsp->scid = cpu_to_le16(chan->dcid);
3499 rsp->result = cpu_to_le16(result);
3500 rsp->flags = cpu_to_le16(0);
3502 return ptr - data;
3505 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3506 void *data, u16 *result)
3508 struct l2cap_conf_req *req = data;
3509 void *ptr = req->data;
3510 int type, olen;
3511 unsigned long val;
3512 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3513 struct l2cap_conf_efs efs;
3515 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3517 while (len >= L2CAP_CONF_OPT_SIZE) {
3518 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3520 switch (type) {
3521 case L2CAP_CONF_MTU:
3522 if (val < L2CAP_DEFAULT_MIN_MTU) {
3523 *result = L2CAP_CONF_UNACCEPT;
3524 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3525 } else
3526 chan->imtu = val;
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3528 break;
3530 case L2CAP_CONF_FLUSH_TO:
3531 chan->flush_to = val;
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3533 2, chan->flush_to);
3534 break;
3536 case L2CAP_CONF_RFC:
3537 if (olen == sizeof(rfc))
3538 memcpy(&rfc, (void *)val, olen);
3540 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3541 rfc.mode != chan->mode)
3542 return -ECONNREFUSED;
3544 chan->fcs = 0;
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3547 sizeof(rfc), (unsigned long) &rfc);
3548 break;
3550 case L2CAP_CONF_EWS:
3551 chan->ack_win = min_t(u16, val, chan->ack_win);
3552 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3553 chan->tx_win);
3554 break;
3556 case L2CAP_CONF_EFS:
3557 if (olen == sizeof(efs))
3558 memcpy(&efs, (void *)val, olen);
3560 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3561 efs.stype != L2CAP_SERV_NOTRAFIC &&
3562 efs.stype != chan->local_stype)
3563 return -ECONNREFUSED;
3565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3566 (unsigned long) &efs);
3567 break;
3569 case L2CAP_CONF_FCS:
3570 if (*result == L2CAP_CONF_PENDING)
3571 if (val == L2CAP_FCS_NONE)
3572 set_bit(CONF_RECV_NO_FCS,
3573 &chan->conf_state);
3574 break;
3578 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3579 return -ECONNREFUSED;
3581 chan->mode = rfc.mode;
3583 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3584 switch (rfc.mode) {
3585 case L2CAP_MODE_ERTM:
3586 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3587 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3588 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3589 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3590 chan->ack_win = min_t(u16, chan->ack_win,
3591 rfc.txwin_size);
3593 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3594 chan->local_msdu = le16_to_cpu(efs.msdu);
3595 chan->local_sdu_itime =
3596 le32_to_cpu(efs.sdu_itime);
3597 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3598 chan->local_flush_to =
3599 le32_to_cpu(efs.flush_to);
3601 break;
3603 case L2CAP_MODE_STREAMING:
3604 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3608 req->dcid = cpu_to_le16(chan->dcid);
3609 req->flags = cpu_to_le16(0);
3611 return ptr - data;
3614 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3615 u16 result, u16 flags)
3617 struct l2cap_conf_rsp *rsp = data;
3618 void *ptr = rsp->data;
3620 BT_DBG("chan %p", chan);
3622 rsp->scid = cpu_to_le16(chan->dcid);
3623 rsp->result = cpu_to_le16(result);
3624 rsp->flags = cpu_to_le16(flags);
3626 return ptr - data;
3629 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3631 struct l2cap_le_conn_rsp rsp;
3632 struct l2cap_conn *conn = chan->conn;
3634 BT_DBG("chan %p", chan);
3636 rsp.dcid = cpu_to_le16(chan->scid);
3637 rsp.mtu = cpu_to_le16(chan->imtu);
3638 rsp.mps = cpu_to_le16(chan->mps);
3639 rsp.credits = cpu_to_le16(chan->rx_credits);
3640 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3642 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3643 &rsp);
3646 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3648 struct l2cap_conn_rsp rsp;
3649 struct l2cap_conn *conn = chan->conn;
3650 u8 buf[128];
3651 u8 rsp_code;
3653 rsp.scid = cpu_to_le16(chan->dcid);
3654 rsp.dcid = cpu_to_le16(chan->scid);
3655 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3656 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3658 if (chan->hs_hcon)
3659 rsp_code = L2CAP_CREATE_CHAN_RSP;
3660 else
3661 rsp_code = L2CAP_CONN_RSP;
3663 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3665 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3667 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3668 return;
3670 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3671 l2cap_build_conf_req(chan, buf), buf);
3672 chan->num_conf_req++;
3675 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3677 int type, olen;
3678 unsigned long val;
3679 /* Use sane default values in case a misbehaving remote device
3680 * did not send an RFC or extended window size option.
3682 u16 txwin_ext = chan->ack_win;
3683 struct l2cap_conf_rfc rfc = {
3684 .mode = chan->mode,
3685 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3686 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3687 .max_pdu_size = cpu_to_le16(chan->imtu),
3688 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3691 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3693 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3694 return;
3696 while (len >= L2CAP_CONF_OPT_SIZE) {
3697 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3699 switch (type) {
3700 case L2CAP_CONF_RFC:
3701 if (olen == sizeof(rfc))
3702 memcpy(&rfc, (void *)val, olen);
3703 break;
3704 case L2CAP_CONF_EWS:
3705 txwin_ext = val;
3706 break;
3710 switch (rfc.mode) {
3711 case L2CAP_MODE_ERTM:
3712 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3713 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3714 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3715 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3716 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3717 else
3718 chan->ack_win = min_t(u16, chan->ack_win,
3719 rfc.txwin_size);
3720 break;
3721 case L2CAP_MODE_STREAMING:
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3726 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3727 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3728 u8 *data)
3730 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3732 if (cmd_len < sizeof(*rej))
3733 return -EPROTO;
3735 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3736 return 0;
3738 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3739 cmd->ident == conn->info_ident) {
3740 cancel_delayed_work(&conn->info_timer);
3742 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3743 conn->info_ident = 0;
3745 l2cap_conn_start(conn);
3748 return 0;
3751 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3752 struct l2cap_cmd_hdr *cmd,
3753 u8 *data, u8 rsp_code, u8 amp_id)
3755 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3756 struct l2cap_conn_rsp rsp;
3757 struct l2cap_chan *chan = NULL, *pchan;
3758 int result, status = L2CAP_CS_NO_INFO;
3760 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3761 __le16 psm = req->psm;
3763 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3765 /* Check if we have socket listening on psm */
3766 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3767 &conn->hcon->dst, ACL_LINK);
3768 if (!pchan) {
3769 result = L2CAP_CR_BAD_PSM;
3770 goto sendresp;
3773 mutex_lock(&conn->chan_lock);
3774 l2cap_chan_lock(pchan);
3776 /* Check if the ACL is secure enough (if not SDP) */
3777 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3778 !hci_conn_check_link_mode(conn->hcon)) {
3779 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3780 result = L2CAP_CR_SEC_BLOCK;
3781 goto response;
3784 result = L2CAP_CR_NO_MEM;
3786 /* Check if we already have channel with that dcid */
3787 if (__l2cap_get_chan_by_dcid(conn, scid))
3788 goto response;
3790 chan = pchan->ops->new_connection(pchan);
3791 if (!chan)
3792 goto response;
3794 /* For certain devices (ex: HID mouse), support for authentication,
3795 * pairing and bonding is optional. For such devices, inorder to avoid
3796 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3797 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3799 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3801 bacpy(&chan->src, &conn->hcon->src);
3802 bacpy(&chan->dst, &conn->hcon->dst);
3803 chan->src_type = bdaddr_src_type(conn->hcon);
3804 chan->dst_type = bdaddr_dst_type(conn->hcon);
3805 chan->psm = psm;
3806 chan->dcid = scid;
3807 chan->local_amp_id = amp_id;
3809 __l2cap_chan_add(conn, chan);
3811 dcid = chan->scid;
3813 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3815 chan->ident = cmd->ident;
3817 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3818 if (l2cap_chan_check_security(chan, false)) {
3819 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHOR_PEND;
3823 chan->ops->defer(chan);
3824 } else {
3825 /* Force pending result for AMP controllers.
3826 * The connection will succeed after the
3827 * physical link is up.
3829 if (amp_id == AMP_ID_BREDR) {
3830 l2cap_state_change(chan, BT_CONFIG);
3831 result = L2CAP_CR_SUCCESS;
3832 } else {
3833 l2cap_state_change(chan, BT_CONNECT2);
3834 result = L2CAP_CR_PEND;
3836 status = L2CAP_CS_NO_INFO;
3838 } else {
3839 l2cap_state_change(chan, BT_CONNECT2);
3840 result = L2CAP_CR_PEND;
3841 status = L2CAP_CS_AUTHEN_PEND;
3843 } else {
3844 l2cap_state_change(chan, BT_CONNECT2);
3845 result = L2CAP_CR_PEND;
3846 status = L2CAP_CS_NO_INFO;
3849 response:
3850 l2cap_chan_unlock(pchan);
3851 mutex_unlock(&conn->chan_lock);
3852 l2cap_chan_put(pchan);
3854 sendresp:
3855 rsp.scid = cpu_to_le16(scid);
3856 rsp.dcid = cpu_to_le16(dcid);
3857 rsp.result = cpu_to_le16(result);
3858 rsp.status = cpu_to_le16(status);
3859 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3861 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3862 struct l2cap_info_req info;
3863 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3865 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3866 conn->info_ident = l2cap_get_ident(conn);
3868 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3870 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3871 sizeof(info), &info);
3874 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3875 result == L2CAP_CR_SUCCESS) {
3876 u8 buf[128];
3877 set_bit(CONF_REQ_SENT, &chan->conf_state);
3878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3879 l2cap_build_conf_req(chan, buf), buf);
3880 chan->num_conf_req++;
3883 return chan;
3886 static int l2cap_connect_req(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3889 struct hci_dev *hdev = conn->hcon->hdev;
3890 struct hci_conn *hcon = conn->hcon;
3892 if (cmd_len < sizeof(struct l2cap_conn_req))
3893 return -EPROTO;
3895 hci_dev_lock(hdev);
3896 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3897 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3898 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3899 hci_dev_unlock(hdev);
3901 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3902 return 0;
3905 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3906 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3907 u8 *data)
3909 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3910 u16 scid, dcid, result, status;
3911 struct l2cap_chan *chan;
3912 u8 req[128];
3913 int err;
3915 if (cmd_len < sizeof(*rsp))
3916 return -EPROTO;
3918 scid = __le16_to_cpu(rsp->scid);
3919 dcid = __le16_to_cpu(rsp->dcid);
3920 result = __le16_to_cpu(rsp->result);
3921 status = __le16_to_cpu(rsp->status);
3923 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3924 dcid, scid, result, status);
3926 mutex_lock(&conn->chan_lock);
3928 if (scid) {
3929 chan = __l2cap_get_chan_by_scid(conn, scid);
3930 if (!chan) {
3931 err = -EBADSLT;
3932 goto unlock;
3934 } else {
3935 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3936 if (!chan) {
3937 err = -EBADSLT;
3938 goto unlock;
3942 err = 0;
3944 l2cap_chan_lock(chan);
3946 switch (result) {
3947 case L2CAP_CR_SUCCESS:
3948 l2cap_state_change(chan, BT_CONFIG);
3949 chan->ident = 0;
3950 chan->dcid = dcid;
3951 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3953 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3954 break;
3956 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3957 l2cap_build_conf_req(chan, req), req);
3958 chan->num_conf_req++;
3959 break;
3961 case L2CAP_CR_PEND:
3962 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3963 break;
3965 default:
3966 l2cap_chan_del(chan, ECONNREFUSED);
3967 break;
3970 l2cap_chan_unlock(chan);
3972 unlock:
3973 mutex_unlock(&conn->chan_lock);
3975 return err;
3978 static inline void set_default_fcs(struct l2cap_chan *chan)
3980 /* FCS is enabled only in ERTM or streaming mode, if one or both
3981 * sides request it.
3983 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3984 chan->fcs = L2CAP_FCS_NONE;
3985 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3986 chan->fcs = L2CAP_FCS_CRC16;
3989 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3990 u8 ident, u16 flags)
3992 struct l2cap_conn *conn = chan->conn;
3994 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3995 flags);
3997 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3998 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4000 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4001 l2cap_build_conf_rsp(chan, data,
4002 L2CAP_CONF_SUCCESS, flags), data);
4005 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4006 u16 scid, u16 dcid)
4008 struct l2cap_cmd_rej_cid rej;
4010 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4011 rej.scid = __cpu_to_le16(scid);
4012 rej.dcid = __cpu_to_le16(dcid);
4014 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4017 static inline int l2cap_config_req(struct l2cap_conn *conn,
4018 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4019 u8 *data)
4021 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4022 u16 dcid, flags;
4023 u8 rsp[64];
4024 struct l2cap_chan *chan;
4025 int len, err = 0;
4027 if (cmd_len < sizeof(*req))
4028 return -EPROTO;
4030 dcid = __le16_to_cpu(req->dcid);
4031 flags = __le16_to_cpu(req->flags);
4033 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4035 chan = l2cap_get_chan_by_scid(conn, dcid);
4036 if (!chan) {
4037 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4038 return 0;
4041 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4042 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4043 chan->dcid);
4044 goto unlock;
4047 /* Reject if config buffer is too small. */
4048 len = cmd_len - sizeof(*req);
4049 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4050 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4051 l2cap_build_conf_rsp(chan, rsp,
4052 L2CAP_CONF_REJECT, flags), rsp);
4053 goto unlock;
4056 /* Store config. */
4057 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4058 chan->conf_len += len;
4060 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4061 /* Incomplete config. Send empty response. */
4062 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4063 l2cap_build_conf_rsp(chan, rsp,
4064 L2CAP_CONF_SUCCESS, flags), rsp);
4065 goto unlock;
4068 /* Complete config. */
4069 len = l2cap_parse_conf_req(chan, rsp);
4070 if (len < 0) {
4071 l2cap_send_disconn_req(chan, ECONNRESET);
4072 goto unlock;
4075 chan->ident = cmd->ident;
4076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4077 chan->num_conf_rsp++;
4079 /* Reset config buffer. */
4080 chan->conf_len = 0;
4082 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4083 goto unlock;
4085 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4086 set_default_fcs(chan);
4088 if (chan->mode == L2CAP_MODE_ERTM ||
4089 chan->mode == L2CAP_MODE_STREAMING)
4090 err = l2cap_ertm_init(chan);
4092 if (err < 0)
4093 l2cap_send_disconn_req(chan, -err);
4094 else
4095 l2cap_chan_ready(chan);
4097 goto unlock;
4100 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4101 u8 buf[64];
4102 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4103 l2cap_build_conf_req(chan, buf), buf);
4104 chan->num_conf_req++;
4107 /* Got Conf Rsp PENDING from remote side and assume we sent
4108 Conf Rsp PENDING in the code above */
4109 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4110 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4112 /* check compatibility */
4114 /* Send rsp for BR/EDR channel */
4115 if (!chan->hs_hcon)
4116 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4117 else
4118 chan->ident = cmd->ident;
4121 unlock:
4122 l2cap_chan_unlock(chan);
4123 return err;
4126 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4128 u8 *data)
4130 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4131 u16 scid, flags, result;
4132 struct l2cap_chan *chan;
4133 int len = cmd_len - sizeof(*rsp);
4134 int err = 0;
4136 if (cmd_len < sizeof(*rsp))
4137 return -EPROTO;
4139 scid = __le16_to_cpu(rsp->scid);
4140 flags = __le16_to_cpu(rsp->flags);
4141 result = __le16_to_cpu(rsp->result);
4143 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4144 result, len);
4146 chan = l2cap_get_chan_by_scid(conn, scid);
4147 if (!chan)
4148 return 0;
4150 switch (result) {
4151 case L2CAP_CONF_SUCCESS:
4152 l2cap_conf_rfc_get(chan, rsp->data, len);
4153 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4154 break;
4156 case L2CAP_CONF_PENDING:
4157 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4159 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4160 char buf[64];
4162 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4163 buf, &result);
4164 if (len < 0) {
4165 l2cap_send_disconn_req(chan, ECONNRESET);
4166 goto done;
4169 if (!chan->hs_hcon) {
4170 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4172 } else {
4173 if (l2cap_check_efs(chan)) {
4174 amp_create_logical_link(chan);
4175 chan->ident = cmd->ident;
4179 goto done;
4181 case L2CAP_CONF_UNACCEPT:
4182 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4183 char req[64];
4185 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4186 l2cap_send_disconn_req(chan, ECONNRESET);
4187 goto done;
4190 /* throw out any old stored conf requests */
4191 result = L2CAP_CONF_SUCCESS;
4192 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4193 req, &result);
4194 if (len < 0) {
4195 l2cap_send_disconn_req(chan, ECONNRESET);
4196 goto done;
4199 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4200 L2CAP_CONF_REQ, len, req);
4201 chan->num_conf_req++;
4202 if (result != L2CAP_CONF_SUCCESS)
4203 goto done;
4204 break;
4207 default:
4208 l2cap_chan_set_err(chan, ECONNRESET);
4210 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4211 l2cap_send_disconn_req(chan, ECONNRESET);
4212 goto done;
4215 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4216 goto done;
4218 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4220 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4221 set_default_fcs(chan);
4223 if (chan->mode == L2CAP_MODE_ERTM ||
4224 chan->mode == L2CAP_MODE_STREAMING)
4225 err = l2cap_ertm_init(chan);
4227 if (err < 0)
4228 l2cap_send_disconn_req(chan, -err);
4229 else
4230 l2cap_chan_ready(chan);
4233 done:
4234 l2cap_chan_unlock(chan);
4235 return err;
4238 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4239 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4240 u8 *data)
4242 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4243 struct l2cap_disconn_rsp rsp;
4244 u16 dcid, scid;
4245 struct l2cap_chan *chan;
4247 if (cmd_len != sizeof(*req))
4248 return -EPROTO;
4250 scid = __le16_to_cpu(req->scid);
4251 dcid = __le16_to_cpu(req->dcid);
4253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4255 mutex_lock(&conn->chan_lock);
4257 chan = __l2cap_get_chan_by_scid(conn, dcid);
4258 if (!chan) {
4259 mutex_unlock(&conn->chan_lock);
4260 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4261 return 0;
4264 l2cap_chan_lock(chan);
4266 rsp.dcid = cpu_to_le16(chan->scid);
4267 rsp.scid = cpu_to_le16(chan->dcid);
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4270 chan->ops->set_shutdown(chan);
4272 l2cap_chan_hold(chan);
4273 l2cap_chan_del(chan, ECONNRESET);
4275 l2cap_chan_unlock(chan);
4277 chan->ops->close(chan);
4278 l2cap_chan_put(chan);
4280 mutex_unlock(&conn->chan_lock);
4282 return 0;
4285 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4286 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4287 u8 *data)
4289 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4290 u16 dcid, scid;
4291 struct l2cap_chan *chan;
4293 if (cmd_len != sizeof(*rsp))
4294 return -EPROTO;
4296 scid = __le16_to_cpu(rsp->scid);
4297 dcid = __le16_to_cpu(rsp->dcid);
4299 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4301 mutex_lock(&conn->chan_lock);
4303 chan = __l2cap_get_chan_by_scid(conn, scid);
4304 if (!chan) {
4305 mutex_unlock(&conn->chan_lock);
4306 return 0;
4309 l2cap_chan_lock(chan);
4311 l2cap_chan_hold(chan);
4312 l2cap_chan_del(chan, 0);
4314 l2cap_chan_unlock(chan);
4316 chan->ops->close(chan);
4317 l2cap_chan_put(chan);
4319 mutex_unlock(&conn->chan_lock);
4321 return 0;
4324 static inline int l2cap_information_req(struct l2cap_conn *conn,
4325 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4326 u8 *data)
4328 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4329 u16 type;
4331 if (cmd_len != sizeof(*req))
4332 return -EPROTO;
4334 type = __le16_to_cpu(req->type);
4336 BT_DBG("type 0x%4.4x", type);
4338 if (type == L2CAP_IT_FEAT_MASK) {
4339 u8 buf[8];
4340 u32 feat_mask = l2cap_feat_mask;
4341 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4342 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4343 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4344 if (!disable_ertm)
4345 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4346 | L2CAP_FEAT_FCS;
4347 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4348 feat_mask |= L2CAP_FEAT_EXT_FLOW
4349 | L2CAP_FEAT_EXT_WINDOW;
4351 put_unaligned_le32(feat_mask, rsp->data);
4352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4353 buf);
4354 } else if (type == L2CAP_IT_FIXED_CHAN) {
4355 u8 buf[12];
4356 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4358 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4359 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4360 rsp->data[0] = conn->local_fixed_chan;
4361 memset(rsp->data + 1, 0, 7);
4362 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4363 buf);
4364 } else {
4365 struct l2cap_info_rsp rsp;
4366 rsp.type = cpu_to_le16(type);
4367 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4368 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4369 &rsp);
4372 return 0;
4375 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4376 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4377 u8 *data)
4379 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4380 u16 type, result;
4382 if (cmd_len < sizeof(*rsp))
4383 return -EPROTO;
4385 type = __le16_to_cpu(rsp->type);
4386 result = __le16_to_cpu(rsp->result);
4388 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4390 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4391 if (cmd->ident != conn->info_ident ||
4392 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4393 return 0;
4395 cancel_delayed_work(&conn->info_timer);
4397 if (result != L2CAP_IR_SUCCESS) {
4398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4399 conn->info_ident = 0;
4401 l2cap_conn_start(conn);
4403 return 0;
4406 switch (type) {
4407 case L2CAP_IT_FEAT_MASK:
4408 conn->feat_mask = get_unaligned_le32(rsp->data);
4410 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4411 struct l2cap_info_req req;
4412 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4414 conn->info_ident = l2cap_get_ident(conn);
4416 l2cap_send_cmd(conn, conn->info_ident,
4417 L2CAP_INFO_REQ, sizeof(req), &req);
4418 } else {
4419 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4420 conn->info_ident = 0;
4422 l2cap_conn_start(conn);
4424 break;
4426 case L2CAP_IT_FIXED_CHAN:
4427 conn->remote_fixed_chan = rsp->data[0];
4428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4429 conn->info_ident = 0;
4431 l2cap_conn_start(conn);
4432 break;
4435 return 0;
4438 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4439 struct l2cap_cmd_hdr *cmd,
4440 u16 cmd_len, void *data)
4442 struct l2cap_create_chan_req *req = data;
4443 struct l2cap_create_chan_rsp rsp;
4444 struct l2cap_chan *chan;
4445 struct hci_dev *hdev;
4446 u16 psm, scid;
4448 if (cmd_len != sizeof(*req))
4449 return -EPROTO;
4451 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4452 return -EINVAL;
4454 psm = le16_to_cpu(req->psm);
4455 scid = le16_to_cpu(req->scid);
4457 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4459 /* For controller id 0 make BR/EDR connection */
4460 if (req->amp_id == AMP_ID_BREDR) {
4461 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 req->amp_id);
4463 return 0;
4466 /* Validate AMP controller id */
4467 hdev = hci_dev_get(req->amp_id);
4468 if (!hdev)
4469 goto error;
4471 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4472 hci_dev_put(hdev);
4473 goto error;
4476 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4477 req->amp_id);
4478 if (chan) {
4479 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4480 struct hci_conn *hs_hcon;
4482 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4483 &conn->hcon->dst);
4484 if (!hs_hcon) {
4485 hci_dev_put(hdev);
4486 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4487 chan->dcid);
4488 return 0;
4491 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4493 mgr->bredr_chan = chan;
4494 chan->hs_hcon = hs_hcon;
4495 chan->fcs = L2CAP_FCS_NONE;
4496 conn->mtu = hdev->block_mtu;
4499 hci_dev_put(hdev);
4501 return 0;
4503 error:
4504 rsp.dcid = 0;
4505 rsp.scid = cpu_to_le16(scid);
4506 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4507 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4509 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4510 sizeof(rsp), &rsp);
4512 return 0;
4515 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4517 struct l2cap_move_chan_req req;
4518 u8 ident;
4520 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4522 ident = l2cap_get_ident(chan->conn);
4523 chan->ident = ident;
4525 req.icid = cpu_to_le16(chan->scid);
4526 req.dest_amp_id = dest_amp_id;
4528 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4529 &req);
4531 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4534 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4536 struct l2cap_move_chan_rsp rsp;
4538 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4540 rsp.icid = cpu_to_le16(chan->dcid);
4541 rsp.result = cpu_to_le16(result);
4543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4544 sizeof(rsp), &rsp);
4547 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4549 struct l2cap_move_chan_cfm cfm;
4551 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4553 chan->ident = l2cap_get_ident(chan->conn);
4555 cfm.icid = cpu_to_le16(chan->scid);
4556 cfm.result = cpu_to_le16(result);
4558 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4559 sizeof(cfm), &cfm);
4561 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4564 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4566 struct l2cap_move_chan_cfm cfm;
4568 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4570 cfm.icid = cpu_to_le16(icid);
4571 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4574 sizeof(cfm), &cfm);
4577 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4578 u16 icid)
4580 struct l2cap_move_chan_cfm_rsp rsp;
4582 BT_DBG("icid 0x%4.4x", icid);
4584 rsp.icid = cpu_to_le16(icid);
4585 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4588 static void __release_logical_link(struct l2cap_chan *chan)
4590 chan->hs_hchan = NULL;
4591 chan->hs_hcon = NULL;
4593 /* Placeholder - release the logical link */
4596 static void l2cap_logical_fail(struct l2cap_chan *chan)
4598 /* Logical link setup failed */
4599 if (chan->state != BT_CONNECTED) {
4600 /* Create channel failure, disconnect */
4601 l2cap_send_disconn_req(chan, ECONNRESET);
4602 return;
4605 switch (chan->move_role) {
4606 case L2CAP_MOVE_ROLE_RESPONDER:
4607 l2cap_move_done(chan);
4608 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4609 break;
4610 case L2CAP_MOVE_ROLE_INITIATOR:
4611 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4612 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4613 /* Remote has only sent pending or
4614 * success responses, clean up
4616 l2cap_move_done(chan);
4619 /* Other amp move states imply that the move
4620 * has already aborted
4622 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4623 break;
4627 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4628 struct hci_chan *hchan)
4630 struct l2cap_conf_rsp rsp;
4632 chan->hs_hchan = hchan;
4633 chan->hs_hcon->l2cap_data = chan->conn;
4635 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4637 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4638 int err;
4640 set_default_fcs(chan);
4642 err = l2cap_ertm_init(chan);
4643 if (err < 0)
4644 l2cap_send_disconn_req(chan, -err);
4645 else
4646 l2cap_chan_ready(chan);
4650 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4651 struct hci_chan *hchan)
4653 chan->hs_hcon = hchan->conn;
4654 chan->hs_hcon->l2cap_data = chan->conn;
4656 BT_DBG("move_state %d", chan->move_state);
4658 switch (chan->move_state) {
4659 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4660 /* Move confirm will be sent after a success
4661 * response is received
4663 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4664 break;
4665 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4666 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4667 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4668 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4669 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4670 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4673 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4675 break;
4676 default:
4677 /* Move was not in expected state, free the channel */
4678 __release_logical_link(chan);
4680 chan->move_state = L2CAP_MOVE_STABLE;
4684 /* Call with chan locked */
4685 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4686 u8 status)
4688 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4690 if (status) {
4691 l2cap_logical_fail(chan);
4692 __release_logical_link(chan);
4693 return;
4696 if (chan->state != BT_CONNECTED) {
4697 /* Ignore logical link if channel is on BR/EDR */
4698 if (chan->local_amp_id != AMP_ID_BREDR)
4699 l2cap_logical_finish_create(chan, hchan);
4700 } else {
4701 l2cap_logical_finish_move(chan, hchan);
4705 void l2cap_move_start(struct l2cap_chan *chan)
4707 BT_DBG("chan %p", chan);
4709 if (chan->local_amp_id == AMP_ID_BREDR) {
4710 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4711 return;
4712 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4713 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4714 /* Placeholder - start physical link setup */
4715 } else {
4716 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4717 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4718 chan->move_id = 0;
4719 l2cap_move_setup(chan);
4720 l2cap_send_move_chan_req(chan, 0);
4724 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4725 u8 local_amp_id, u8 remote_amp_id)
4727 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4728 local_amp_id, remote_amp_id);
4730 chan->fcs = L2CAP_FCS_NONE;
4732 /* Outgoing channel on AMP */
4733 if (chan->state == BT_CONNECT) {
4734 if (result == L2CAP_CR_SUCCESS) {
4735 chan->local_amp_id = local_amp_id;
4736 l2cap_send_create_chan_req(chan, remote_amp_id);
4737 } else {
4738 /* Revert to BR/EDR connect */
4739 l2cap_send_conn_req(chan);
4742 return;
4745 /* Incoming channel on AMP */
4746 if (__l2cap_no_conn_pending(chan)) {
4747 struct l2cap_conn_rsp rsp;
4748 char buf[128];
4749 rsp.scid = cpu_to_le16(chan->dcid);
4750 rsp.dcid = cpu_to_le16(chan->scid);
4752 if (result == L2CAP_CR_SUCCESS) {
4753 /* Send successful response */
4754 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4755 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4756 } else {
4757 /* Send negative response */
4758 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4762 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4763 sizeof(rsp), &rsp);
4765 if (result == L2CAP_CR_SUCCESS) {
4766 l2cap_state_change(chan, BT_CONFIG);
4767 set_bit(CONF_REQ_SENT, &chan->conf_state);
4768 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4769 L2CAP_CONF_REQ,
4770 l2cap_build_conf_req(chan, buf), buf);
4771 chan->num_conf_req++;
4776 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4777 u8 remote_amp_id)
4779 l2cap_move_setup(chan);
4780 chan->move_id = local_amp_id;
4781 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4783 l2cap_send_move_chan_req(chan, remote_amp_id);
4786 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4788 struct hci_chan *hchan = NULL;
4790 /* Placeholder - get hci_chan for logical link */
4792 if (hchan) {
4793 if (hchan->state == BT_CONNECTED) {
4794 /* Logical link is ready to go */
4795 chan->hs_hcon = hchan->conn;
4796 chan->hs_hcon->l2cap_data = chan->conn;
4797 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4798 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4800 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4801 } else {
4802 /* Wait for logical link to be ready */
4803 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4805 } else {
4806 /* Logical link not available */
4807 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4811 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4813 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4814 u8 rsp_result;
4815 if (result == -EINVAL)
4816 rsp_result = L2CAP_MR_BAD_ID;
4817 else
4818 rsp_result = L2CAP_MR_NOT_ALLOWED;
4820 l2cap_send_move_chan_rsp(chan, rsp_result);
4823 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4824 chan->move_state = L2CAP_MOVE_STABLE;
4826 /* Restart data transmission */
4827 l2cap_ertm_send(chan);
4830 /* Invoke with locked chan */
4831 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4833 u8 local_amp_id = chan->local_amp_id;
4834 u8 remote_amp_id = chan->remote_amp_id;
4836 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4837 chan, result, local_amp_id, remote_amp_id);
4839 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4840 l2cap_chan_unlock(chan);
4841 return;
4844 if (chan->state != BT_CONNECTED) {
4845 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4846 } else if (result != L2CAP_MR_SUCCESS) {
4847 l2cap_do_move_cancel(chan, result);
4848 } else {
4849 switch (chan->move_role) {
4850 case L2CAP_MOVE_ROLE_INITIATOR:
4851 l2cap_do_move_initiate(chan, local_amp_id,
4852 remote_amp_id);
4853 break;
4854 case L2CAP_MOVE_ROLE_RESPONDER:
4855 l2cap_do_move_respond(chan, result);
4856 break;
4857 default:
4858 l2cap_do_move_cancel(chan, result);
4859 break;
4864 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4865 struct l2cap_cmd_hdr *cmd,
4866 u16 cmd_len, void *data)
4868 struct l2cap_move_chan_req *req = data;
4869 struct l2cap_move_chan_rsp rsp;
4870 struct l2cap_chan *chan;
4871 u16 icid = 0;
4872 u16 result = L2CAP_MR_NOT_ALLOWED;
4874 if (cmd_len != sizeof(*req))
4875 return -EPROTO;
4877 icid = le16_to_cpu(req->icid);
4879 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4881 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4882 return -EINVAL;
4884 chan = l2cap_get_chan_by_dcid(conn, icid);
4885 if (!chan) {
4886 rsp.icid = cpu_to_le16(icid);
4887 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4888 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4889 sizeof(rsp), &rsp);
4890 return 0;
4893 chan->ident = cmd->ident;
4895 if (chan->scid < L2CAP_CID_DYN_START ||
4896 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4897 (chan->mode != L2CAP_MODE_ERTM &&
4898 chan->mode != L2CAP_MODE_STREAMING)) {
4899 result = L2CAP_MR_NOT_ALLOWED;
4900 goto send_move_response;
4903 if (chan->local_amp_id == req->dest_amp_id) {
4904 result = L2CAP_MR_SAME_ID;
4905 goto send_move_response;
4908 if (req->dest_amp_id != AMP_ID_BREDR) {
4909 struct hci_dev *hdev;
4910 hdev = hci_dev_get(req->dest_amp_id);
4911 if (!hdev || hdev->dev_type != HCI_AMP ||
4912 !test_bit(HCI_UP, &hdev->flags)) {
4913 if (hdev)
4914 hci_dev_put(hdev);
4916 result = L2CAP_MR_BAD_ID;
4917 goto send_move_response;
4919 hci_dev_put(hdev);
4922 /* Detect a move collision. Only send a collision response
4923 * if this side has "lost", otherwise proceed with the move.
4924 * The winner has the larger bd_addr.
4926 if ((__chan_is_moving(chan) ||
4927 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4928 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4929 result = L2CAP_MR_COLLISION;
4930 goto send_move_response;
4933 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4934 l2cap_move_setup(chan);
4935 chan->move_id = req->dest_amp_id;
4936 icid = chan->dcid;
4938 if (req->dest_amp_id == AMP_ID_BREDR) {
4939 /* Moving to BR/EDR */
4940 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4941 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4942 result = L2CAP_MR_PEND;
4943 } else {
4944 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4945 result = L2CAP_MR_SUCCESS;
4947 } else {
4948 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4949 /* Placeholder - uncomment when amp functions are available */
4950 /*amp_accept_physical(chan, req->dest_amp_id);*/
4951 result = L2CAP_MR_PEND;
4954 send_move_response:
4955 l2cap_send_move_chan_rsp(chan, result);
4957 l2cap_chan_unlock(chan);
4959 return 0;
4962 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4964 struct l2cap_chan *chan;
4965 struct hci_chan *hchan = NULL;
4967 chan = l2cap_get_chan_by_scid(conn, icid);
4968 if (!chan) {
4969 l2cap_send_move_chan_cfm_icid(conn, icid);
4970 return;
4973 __clear_chan_timer(chan);
4974 if (result == L2CAP_MR_PEND)
4975 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4977 switch (chan->move_state) {
4978 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4979 /* Move confirm will be sent when logical link
4980 * is complete.
4982 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4983 break;
4984 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4985 if (result == L2CAP_MR_PEND) {
4986 break;
4987 } else if (test_bit(CONN_LOCAL_BUSY,
4988 &chan->conn_state)) {
4989 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4990 } else {
4991 /* Logical link is up or moving to BR/EDR,
4992 * proceed with move
4994 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4997 break;
4998 case L2CAP_MOVE_WAIT_RSP:
4999 /* Moving to AMP */
5000 if (result == L2CAP_MR_SUCCESS) {
5001 /* Remote is ready, send confirm immediately
5002 * after logical link is ready
5004 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5005 } else {
5006 /* Both logical link and move success
5007 * are required to confirm
5009 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5012 /* Placeholder - get hci_chan for logical link */
5013 if (!hchan) {
5014 /* Logical link not available */
5015 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5016 break;
5019 /* If the logical link is not yet connected, do not
5020 * send confirmation.
5022 if (hchan->state != BT_CONNECTED)
5023 break;
5025 /* Logical link is already ready to go */
5027 chan->hs_hcon = hchan->conn;
5028 chan->hs_hcon->l2cap_data = chan->conn;
5030 if (result == L2CAP_MR_SUCCESS) {
5031 /* Can confirm now */
5032 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5033 } else {
5034 /* Now only need move success
5035 * to confirm
5037 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5040 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5041 break;
5042 default:
5043 /* Any other amp move state means the move failed. */
5044 chan->move_id = chan->local_amp_id;
5045 l2cap_move_done(chan);
5046 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5049 l2cap_chan_unlock(chan);
5052 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5053 u16 result)
5055 struct l2cap_chan *chan;
5057 chan = l2cap_get_chan_by_ident(conn, ident);
5058 if (!chan) {
5059 /* Could not locate channel, icid is best guess */
5060 l2cap_send_move_chan_cfm_icid(conn, icid);
5061 return;
5064 __clear_chan_timer(chan);
5066 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5067 if (result == L2CAP_MR_COLLISION) {
5068 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5069 } else {
5070 /* Cleanup - cancel move */
5071 chan->move_id = chan->local_amp_id;
5072 l2cap_move_done(chan);
5076 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5078 l2cap_chan_unlock(chan);
5081 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5082 struct l2cap_cmd_hdr *cmd,
5083 u16 cmd_len, void *data)
5085 struct l2cap_move_chan_rsp *rsp = data;
5086 u16 icid, result;
5088 if (cmd_len != sizeof(*rsp))
5089 return -EPROTO;
5091 icid = le16_to_cpu(rsp->icid);
5092 result = le16_to_cpu(rsp->result);
5094 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5096 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5097 l2cap_move_continue(conn, icid, result);
5098 else
5099 l2cap_move_fail(conn, cmd->ident, icid, result);
5101 return 0;
5104 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5105 struct l2cap_cmd_hdr *cmd,
5106 u16 cmd_len, void *data)
5108 struct l2cap_move_chan_cfm *cfm = data;
5109 struct l2cap_chan *chan;
5110 u16 icid, result;
5112 if (cmd_len != sizeof(*cfm))
5113 return -EPROTO;
5115 icid = le16_to_cpu(cfm->icid);
5116 result = le16_to_cpu(cfm->result);
5118 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5120 chan = l2cap_get_chan_by_dcid(conn, icid);
5121 if (!chan) {
5122 /* Spec requires a response even if the icid was not found */
5123 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5124 return 0;
5127 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5128 if (result == L2CAP_MC_CONFIRMED) {
5129 chan->local_amp_id = chan->move_id;
5130 if (chan->local_amp_id == AMP_ID_BREDR)
5131 __release_logical_link(chan);
5132 } else {
5133 chan->move_id = chan->local_amp_id;
5136 l2cap_move_done(chan);
5139 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5141 l2cap_chan_unlock(chan);
5143 return 0;
5146 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5147 struct l2cap_cmd_hdr *cmd,
5148 u16 cmd_len, void *data)
5150 struct l2cap_move_chan_cfm_rsp *rsp = data;
5151 struct l2cap_chan *chan;
5152 u16 icid;
5154 if (cmd_len != sizeof(*rsp))
5155 return -EPROTO;
5157 icid = le16_to_cpu(rsp->icid);
5159 BT_DBG("icid 0x%4.4x", icid);
5161 chan = l2cap_get_chan_by_scid(conn, icid);
5162 if (!chan)
5163 return 0;
5165 __clear_chan_timer(chan);
5167 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5168 chan->local_amp_id = chan->move_id;
5170 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5171 __release_logical_link(chan);
5173 l2cap_move_done(chan);
5176 l2cap_chan_unlock(chan);
5178 return 0;
5181 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5182 struct l2cap_cmd_hdr *cmd,
5183 u16 cmd_len, u8 *data)
5185 struct hci_conn *hcon = conn->hcon;
5186 struct l2cap_conn_param_update_req *req;
5187 struct l2cap_conn_param_update_rsp rsp;
5188 u16 min, max, latency, to_multiplier;
5189 int err;
5191 if (hcon->role != HCI_ROLE_MASTER)
5192 return -EINVAL;
5194 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5195 return -EPROTO;
5197 req = (struct l2cap_conn_param_update_req *) data;
5198 min = __le16_to_cpu(req->min);
5199 max = __le16_to_cpu(req->max);
5200 latency = __le16_to_cpu(req->latency);
5201 to_multiplier = __le16_to_cpu(req->to_multiplier);
5203 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5204 min, max, latency, to_multiplier);
5206 memset(&rsp, 0, sizeof(rsp));
5208 err = hci_check_conn_params(min, max, latency, to_multiplier);
5209 if (err)
5210 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5211 else
5212 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5214 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5215 sizeof(rsp), &rsp);
5217 if (!err) {
5218 u8 store_hint;
5220 store_hint = hci_le_conn_update(hcon, min, max, latency,
5221 to_multiplier);
5222 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5223 store_hint, min, max, latency,
5224 to_multiplier);
5228 return 0;
5231 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5232 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5233 u8 *data)
5235 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5236 struct hci_conn *hcon = conn->hcon;
5237 u16 dcid, mtu, mps, credits, result;
5238 struct l2cap_chan *chan;
5239 int err, sec_level;
5241 if (cmd_len < sizeof(*rsp))
5242 return -EPROTO;
5244 dcid = __le16_to_cpu(rsp->dcid);
5245 mtu = __le16_to_cpu(rsp->mtu);
5246 mps = __le16_to_cpu(rsp->mps);
5247 credits = __le16_to_cpu(rsp->credits);
5248 result = __le16_to_cpu(rsp->result);
5250 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5251 return -EPROTO;
5253 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5254 dcid, mtu, mps, credits, result);
5256 mutex_lock(&conn->chan_lock);
5258 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5259 if (!chan) {
5260 err = -EBADSLT;
5261 goto unlock;
5264 err = 0;
5266 l2cap_chan_lock(chan);
5268 switch (result) {
5269 case L2CAP_CR_SUCCESS:
5270 chan->ident = 0;
5271 chan->dcid = dcid;
5272 chan->omtu = mtu;
5273 chan->remote_mps = mps;
5274 chan->tx_credits = credits;
5275 l2cap_chan_ready(chan);
5276 break;
5278 case L2CAP_CR_AUTHENTICATION:
5279 case L2CAP_CR_ENCRYPTION:
5280 /* If we already have MITM protection we can't do
5281 * anything.
5283 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5284 l2cap_chan_del(chan, ECONNREFUSED);
5285 break;
5288 sec_level = hcon->sec_level + 1;
5289 if (chan->sec_level < sec_level)
5290 chan->sec_level = sec_level;
5292 /* We'll need to send a new Connect Request */
5293 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5295 smp_conn_security(hcon, chan->sec_level);
5296 break;
5298 default:
5299 l2cap_chan_del(chan, ECONNREFUSED);
5300 break;
5303 l2cap_chan_unlock(chan);
5305 unlock:
5306 mutex_unlock(&conn->chan_lock);
5308 return err;
5311 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5312 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5313 u8 *data)
5315 int err = 0;
5317 switch (cmd->code) {
5318 case L2CAP_COMMAND_REJ:
5319 l2cap_command_rej(conn, cmd, cmd_len, data);
5320 break;
5322 case L2CAP_CONN_REQ:
5323 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5324 break;
5326 case L2CAP_CONN_RSP:
5327 case L2CAP_CREATE_CHAN_RSP:
5328 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5329 break;
5331 case L2CAP_CONF_REQ:
5332 err = l2cap_config_req(conn, cmd, cmd_len, data);
5333 break;
5335 case L2CAP_CONF_RSP:
5336 l2cap_config_rsp(conn, cmd, cmd_len, data);
5337 break;
5339 case L2CAP_DISCONN_REQ:
5340 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5341 break;
5343 case L2CAP_DISCONN_RSP:
5344 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5345 break;
5347 case L2CAP_ECHO_REQ:
5348 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5349 break;
5351 case L2CAP_ECHO_RSP:
5352 break;
5354 case L2CAP_INFO_REQ:
5355 err = l2cap_information_req(conn, cmd, cmd_len, data);
5356 break;
5358 case L2CAP_INFO_RSP:
5359 l2cap_information_rsp(conn, cmd, cmd_len, data);
5360 break;
5362 case L2CAP_CREATE_CHAN_REQ:
5363 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5364 break;
5366 case L2CAP_MOVE_CHAN_REQ:
5367 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5368 break;
5370 case L2CAP_MOVE_CHAN_RSP:
5371 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5372 break;
5374 case L2CAP_MOVE_CHAN_CFM:
5375 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5376 break;
5378 case L2CAP_MOVE_CHAN_CFM_RSP:
5379 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5380 break;
5382 default:
5383 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5384 err = -EINVAL;
5385 break;
5388 return err;
5391 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5392 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5393 u8 *data)
5395 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5396 struct l2cap_le_conn_rsp rsp;
5397 struct l2cap_chan *chan, *pchan;
5398 u16 dcid, scid, credits, mtu, mps;
5399 __le16 psm;
5400 u8 result;
5402 if (cmd_len != sizeof(*req))
5403 return -EPROTO;
5405 scid = __le16_to_cpu(req->scid);
5406 mtu = __le16_to_cpu(req->mtu);
5407 mps = __le16_to_cpu(req->mps);
5408 psm = req->psm;
5409 dcid = 0;
5410 credits = 0;
5412 if (mtu < 23 || mps < 23)
5413 return -EPROTO;
5415 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5416 scid, mtu, mps);
5418 /* Check if we have socket listening on psm */
5419 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5420 &conn->hcon->dst, LE_LINK);
5421 if (!pchan) {
5422 result = L2CAP_CR_BAD_PSM;
5423 chan = NULL;
5424 goto response;
5427 mutex_lock(&conn->chan_lock);
5428 l2cap_chan_lock(pchan);
5430 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5431 SMP_ALLOW_STK)) {
5432 result = L2CAP_CR_AUTHENTICATION;
5433 chan = NULL;
5434 goto response_unlock;
5437 /* Check if we already have channel with that dcid */
5438 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5439 result = L2CAP_CR_NO_MEM;
5440 chan = NULL;
5441 goto response_unlock;
5444 chan = pchan->ops->new_connection(pchan);
5445 if (!chan) {
5446 result = L2CAP_CR_NO_MEM;
5447 goto response_unlock;
5450 l2cap_le_flowctl_init(chan);
5452 bacpy(&chan->src, &conn->hcon->src);
5453 bacpy(&chan->dst, &conn->hcon->dst);
5454 chan->src_type = bdaddr_src_type(conn->hcon);
5455 chan->dst_type = bdaddr_dst_type(conn->hcon);
5456 chan->psm = psm;
5457 chan->dcid = scid;
5458 chan->omtu = mtu;
5459 chan->remote_mps = mps;
5460 chan->tx_credits = __le16_to_cpu(req->credits);
5462 __l2cap_chan_add(conn, chan);
5463 dcid = chan->scid;
5464 credits = chan->rx_credits;
5466 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5468 chan->ident = cmd->ident;
5470 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5471 l2cap_state_change(chan, BT_CONNECT2);
5472 /* The following result value is actually not defined
5473 * for LE CoC but we use it to let the function know
5474 * that it should bail out after doing its cleanup
5475 * instead of sending a response.
5477 result = L2CAP_CR_PEND;
5478 chan->ops->defer(chan);
5479 } else {
5480 l2cap_chan_ready(chan);
5481 result = L2CAP_CR_SUCCESS;
5484 response_unlock:
5485 l2cap_chan_unlock(pchan);
5486 mutex_unlock(&conn->chan_lock);
5487 l2cap_chan_put(pchan);
5489 if (result == L2CAP_CR_PEND)
5490 return 0;
5492 response:
5493 if (chan) {
5494 rsp.mtu = cpu_to_le16(chan->imtu);
5495 rsp.mps = cpu_to_le16(chan->mps);
5496 } else {
5497 rsp.mtu = 0;
5498 rsp.mps = 0;
5501 rsp.dcid = cpu_to_le16(dcid);
5502 rsp.credits = cpu_to_le16(credits);
5503 rsp.result = cpu_to_le16(result);
5505 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5507 return 0;
5510 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5511 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5512 u8 *data)
5514 struct l2cap_le_credits *pkt;
5515 struct l2cap_chan *chan;
5516 u16 cid, credits, max_credits;
5518 if (cmd_len != sizeof(*pkt))
5519 return -EPROTO;
5521 pkt = (struct l2cap_le_credits *) data;
5522 cid = __le16_to_cpu(pkt->cid);
5523 credits = __le16_to_cpu(pkt->credits);
5525 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5527 chan = l2cap_get_chan_by_dcid(conn, cid);
5528 if (!chan)
5529 return -EBADSLT;
5531 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5532 if (credits > max_credits) {
5533 BT_ERR("LE credits overflow");
5534 l2cap_send_disconn_req(chan, ECONNRESET);
5535 l2cap_chan_unlock(chan);
5537 /* Return 0 so that we don't trigger an unnecessary
5538 * command reject packet.
5540 return 0;
5543 chan->tx_credits += credits;
5545 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5546 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5547 chan->tx_credits--;
5550 if (chan->tx_credits)
5551 chan->ops->resume(chan);
5553 l2cap_chan_unlock(chan);
5555 return 0;
5558 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5559 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5560 u8 *data)
5562 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5563 struct l2cap_chan *chan;
5565 if (cmd_len < sizeof(*rej))
5566 return -EPROTO;
5568 mutex_lock(&conn->chan_lock);
5570 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5571 if (!chan)
5572 goto done;
5574 l2cap_chan_lock(chan);
5575 l2cap_chan_del(chan, ECONNREFUSED);
5576 l2cap_chan_unlock(chan);
5578 done:
5579 mutex_unlock(&conn->chan_lock);
5580 return 0;
5583 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5584 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5585 u8 *data)
5587 int err = 0;
5589 switch (cmd->code) {
5590 case L2CAP_COMMAND_REJ:
5591 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5592 break;
5594 case L2CAP_CONN_PARAM_UPDATE_REQ:
5595 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5596 break;
5598 case L2CAP_CONN_PARAM_UPDATE_RSP:
5599 break;
5601 case L2CAP_LE_CONN_RSP:
5602 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5603 break;
5605 case L2CAP_LE_CONN_REQ:
5606 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5607 break;
5609 case L2CAP_LE_CREDITS:
5610 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5611 break;
5613 case L2CAP_DISCONN_REQ:
5614 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5615 break;
5617 case L2CAP_DISCONN_RSP:
5618 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5619 break;
5621 default:
5622 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5623 err = -EINVAL;
5624 break;
5627 return err;
5630 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5631 struct sk_buff *skb)
5633 struct hci_conn *hcon = conn->hcon;
5634 struct l2cap_cmd_hdr *cmd;
5635 u16 len;
5636 int err;
5638 if (hcon->type != LE_LINK)
5639 goto drop;
5641 if (skb->len < L2CAP_CMD_HDR_SIZE)
5642 goto drop;
5644 cmd = (void *) skb->data;
5645 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5647 len = le16_to_cpu(cmd->len);
5649 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5651 if (len != skb->len || !cmd->ident) {
5652 BT_DBG("corrupted command");
5653 goto drop;
5656 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5657 if (err) {
5658 struct l2cap_cmd_rej_unk rej;
5660 BT_ERR("Wrong link type (%d)", err);
5662 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5663 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5664 sizeof(rej), &rej);
5667 drop:
5668 kfree_skb(skb);
5671 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5672 struct sk_buff *skb)
5674 struct hci_conn *hcon = conn->hcon;
5675 u8 *data = skb->data;
5676 int len = skb->len;
5677 struct l2cap_cmd_hdr cmd;
5678 int err;
5680 l2cap_raw_recv(conn, skb);
5682 if (hcon->type != ACL_LINK)
5683 goto drop;
5685 while (len >= L2CAP_CMD_HDR_SIZE) {
5686 u16 cmd_len;
5687 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5688 data += L2CAP_CMD_HDR_SIZE;
5689 len -= L2CAP_CMD_HDR_SIZE;
5691 cmd_len = le16_to_cpu(cmd.len);
5693 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5694 cmd.ident);
5696 if (cmd_len > len || !cmd.ident) {
5697 BT_DBG("corrupted command");
5698 break;
5701 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5702 if (err) {
5703 struct l2cap_cmd_rej_unk rej;
5705 BT_ERR("Wrong link type (%d)", err);
5707 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5708 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5709 sizeof(rej), &rej);
5712 data += cmd_len;
5713 len -= cmd_len;
5716 drop:
5717 kfree_skb(skb);
5720 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5722 u16 our_fcs, rcv_fcs;
5723 int hdr_size;
5725 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5726 hdr_size = L2CAP_EXT_HDR_SIZE;
5727 else
5728 hdr_size = L2CAP_ENH_HDR_SIZE;
5730 if (chan->fcs == L2CAP_FCS_CRC16) {
5731 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5732 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5733 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5735 if (our_fcs != rcv_fcs)
5736 return -EBADMSG;
5738 return 0;
5741 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5743 struct l2cap_ctrl control;
5745 BT_DBG("chan %p", chan);
5747 memset(&control, 0, sizeof(control));
5748 control.sframe = 1;
5749 control.final = 1;
5750 control.reqseq = chan->buffer_seq;
5751 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5754 control.super = L2CAP_SUPER_RNR;
5755 l2cap_send_sframe(chan, &control);
5758 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5759 chan->unacked_frames > 0)
5760 __set_retrans_timer(chan);
5762 /* Send pending iframes */
5763 l2cap_ertm_send(chan);
5765 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5766 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5767 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5768 * send it now.
5770 control.super = L2CAP_SUPER_RR;
5771 l2cap_send_sframe(chan, &control);
5775 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5776 struct sk_buff **last_frag)
5778 /* skb->len reflects data in skb as well as all fragments
5779 * skb->data_len reflects only data in fragments
5781 if (!skb_has_frag_list(skb))
5782 skb_shinfo(skb)->frag_list = new_frag;
5784 new_frag->next = NULL;
5786 (*last_frag)->next = new_frag;
5787 *last_frag = new_frag;
5789 skb->len += new_frag->len;
5790 skb->data_len += new_frag->len;
5791 skb->truesize += new_frag->truesize;
5794 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5795 struct l2cap_ctrl *control)
5797 int err = -EINVAL;
5799 switch (control->sar) {
5800 case L2CAP_SAR_UNSEGMENTED:
5801 if (chan->sdu)
5802 break;
5804 err = chan->ops->recv(chan, skb);
5805 break;
5807 case L2CAP_SAR_START:
5808 if (chan->sdu)
5809 break;
5811 chan->sdu_len = get_unaligned_le16(skb->data);
5812 skb_pull(skb, L2CAP_SDULEN_SIZE);
5814 if (chan->sdu_len > chan->imtu) {
5815 err = -EMSGSIZE;
5816 break;
5819 if (skb->len >= chan->sdu_len)
5820 break;
5822 chan->sdu = skb;
5823 chan->sdu_last_frag = skb;
5825 skb = NULL;
5826 err = 0;
5827 break;
5829 case L2CAP_SAR_CONTINUE:
5830 if (!chan->sdu)
5831 break;
5833 append_skb_frag(chan->sdu, skb,
5834 &chan->sdu_last_frag);
5835 skb = NULL;
5837 if (chan->sdu->len >= chan->sdu_len)
5838 break;
5840 err = 0;
5841 break;
5843 case L2CAP_SAR_END:
5844 if (!chan->sdu)
5845 break;
5847 append_skb_frag(chan->sdu, skb,
5848 &chan->sdu_last_frag);
5849 skb = NULL;
5851 if (chan->sdu->len != chan->sdu_len)
5852 break;
5854 err = chan->ops->recv(chan, chan->sdu);
5856 if (!err) {
5857 /* Reassembly complete */
5858 chan->sdu = NULL;
5859 chan->sdu_last_frag = NULL;
5860 chan->sdu_len = 0;
5862 break;
5865 if (err) {
5866 kfree_skb(skb);
5867 kfree_skb(chan->sdu);
5868 chan->sdu = NULL;
5869 chan->sdu_last_frag = NULL;
5870 chan->sdu_len = 0;
5873 return err;
5876 static int l2cap_resegment(struct l2cap_chan *chan)
5878 /* Placeholder */
5879 return 0;
5882 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5884 u8 event;
5886 if (chan->mode != L2CAP_MODE_ERTM)
5887 return;
5889 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5890 l2cap_tx(chan, NULL, NULL, event);
5893 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5895 int err = 0;
5896 /* Pass sequential frames to l2cap_reassemble_sdu()
5897 * until a gap is encountered.
5900 BT_DBG("chan %p", chan);
5902 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5903 struct sk_buff *skb;
5904 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5905 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5907 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5909 if (!skb)
5910 break;
5912 skb_unlink(skb, &chan->srej_q);
5913 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5914 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5915 if (err)
5916 break;
5919 if (skb_queue_empty(&chan->srej_q)) {
5920 chan->rx_state = L2CAP_RX_STATE_RECV;
5921 l2cap_send_ack(chan);
5924 return err;
5927 static void l2cap_handle_srej(struct l2cap_chan *chan,
5928 struct l2cap_ctrl *control)
5930 struct sk_buff *skb;
5932 BT_DBG("chan %p, control %p", chan, control);
5934 if (control->reqseq == chan->next_tx_seq) {
5935 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5936 l2cap_send_disconn_req(chan, ECONNRESET);
5937 return;
5940 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5942 if (skb == NULL) {
5943 BT_DBG("Seq %d not available for retransmission",
5944 control->reqseq);
5945 return;
5948 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5949 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5950 l2cap_send_disconn_req(chan, ECONNRESET);
5951 return;
5954 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5956 if (control->poll) {
5957 l2cap_pass_to_tx(chan, control);
5959 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5960 l2cap_retransmit(chan, control);
5961 l2cap_ertm_send(chan);
5963 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5964 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5965 chan->srej_save_reqseq = control->reqseq;
5967 } else {
5968 l2cap_pass_to_tx_fbit(chan, control);
5970 if (control->final) {
5971 if (chan->srej_save_reqseq != control->reqseq ||
5972 !test_and_clear_bit(CONN_SREJ_ACT,
5973 &chan->conn_state))
5974 l2cap_retransmit(chan, control);
5975 } else {
5976 l2cap_retransmit(chan, control);
5977 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5978 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5979 chan->srej_save_reqseq = control->reqseq;
5985 static void l2cap_handle_rej(struct l2cap_chan *chan,
5986 struct l2cap_ctrl *control)
5988 struct sk_buff *skb;
5990 BT_DBG("chan %p, control %p", chan, control);
5992 if (control->reqseq == chan->next_tx_seq) {
5993 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5994 l2cap_send_disconn_req(chan, ECONNRESET);
5995 return;
5998 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6000 if (chan->max_tx && skb &&
6001 bt_cb(skb)->control.retries >= chan->max_tx) {
6002 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6003 l2cap_send_disconn_req(chan, ECONNRESET);
6004 return;
6007 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6009 l2cap_pass_to_tx(chan, control);
6011 if (control->final) {
6012 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6013 l2cap_retransmit_all(chan, control);
6014 } else {
6015 l2cap_retransmit_all(chan, control);
6016 l2cap_ertm_send(chan);
6017 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6018 set_bit(CONN_REJ_ACT, &chan->conn_state);
6022 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6024 BT_DBG("chan %p, txseq %d", chan, txseq);
6026 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6027 chan->expected_tx_seq);
6029 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6030 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6031 chan->tx_win) {
6032 /* See notes below regarding "double poll" and
6033 * invalid packets.
6035 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6036 BT_DBG("Invalid/Ignore - after SREJ");
6037 return L2CAP_TXSEQ_INVALID_IGNORE;
6038 } else {
6039 BT_DBG("Invalid - in window after SREJ sent");
6040 return L2CAP_TXSEQ_INVALID;
6044 if (chan->srej_list.head == txseq) {
6045 BT_DBG("Expected SREJ");
6046 return L2CAP_TXSEQ_EXPECTED_SREJ;
6049 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6050 BT_DBG("Duplicate SREJ - txseq already stored");
6051 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6054 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6055 BT_DBG("Unexpected SREJ - not requested");
6056 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6060 if (chan->expected_tx_seq == txseq) {
6061 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6062 chan->tx_win) {
6063 BT_DBG("Invalid - txseq outside tx window");
6064 return L2CAP_TXSEQ_INVALID;
6065 } else {
6066 BT_DBG("Expected");
6067 return L2CAP_TXSEQ_EXPECTED;
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6072 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6073 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6074 return L2CAP_TXSEQ_DUPLICATE;
6077 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6078 /* A source of invalid packets is a "double poll" condition,
6079 * where delays cause us to send multiple poll packets. If
6080 * the remote stack receives and processes both polls,
6081 * sequence numbers can wrap around in such a way that a
6082 * resent frame has a sequence number that looks like new data
6083 * with a sequence gap. This would trigger an erroneous SREJ
6084 * request.
6086 * Fortunately, this is impossible with a tx window that's
6087 * less than half of the maximum sequence number, which allows
6088 * invalid frames to be safely ignored.
6090 * With tx window sizes greater than half of the tx window
6091 * maximum, the frame is invalid and cannot be ignored. This
6092 * causes a disconnect.
6095 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6096 BT_DBG("Invalid/Ignore - txseq outside tx window");
6097 return L2CAP_TXSEQ_INVALID_IGNORE;
6098 } else {
6099 BT_DBG("Invalid - txseq outside tx window");
6100 return L2CAP_TXSEQ_INVALID;
6102 } else {
6103 BT_DBG("Unexpected - txseq indicates missing frames");
6104 return L2CAP_TXSEQ_UNEXPECTED;
6108 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6109 struct l2cap_ctrl *control,
6110 struct sk_buff *skb, u8 event)
6112 int err = 0;
6113 bool skb_in_use = false;
6115 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6116 event);
6118 switch (event) {
6119 case L2CAP_EV_RECV_IFRAME:
6120 switch (l2cap_classify_txseq(chan, control->txseq)) {
6121 case L2CAP_TXSEQ_EXPECTED:
6122 l2cap_pass_to_tx(chan, control);
6124 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6125 BT_DBG("Busy, discarding expected seq %d",
6126 control->txseq);
6127 break;
6130 chan->expected_tx_seq = __next_seq(chan,
6131 control->txseq);
6133 chan->buffer_seq = chan->expected_tx_seq;
6134 skb_in_use = true;
6136 err = l2cap_reassemble_sdu(chan, skb, control);
6137 if (err)
6138 break;
6140 if (control->final) {
6141 if (!test_and_clear_bit(CONN_REJ_ACT,
6142 &chan->conn_state)) {
6143 control->final = 0;
6144 l2cap_retransmit_all(chan, control);
6145 l2cap_ertm_send(chan);
6149 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6150 l2cap_send_ack(chan);
6151 break;
6152 case L2CAP_TXSEQ_UNEXPECTED:
6153 l2cap_pass_to_tx(chan, control);
6155 /* Can't issue SREJ frames in the local busy state.
6156 * Drop this frame, it will be seen as missing
6157 * when local busy is exited.
6159 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6160 BT_DBG("Busy, discarding unexpected seq %d",
6161 control->txseq);
6162 break;
6165 /* There was a gap in the sequence, so an SREJ
6166 * must be sent for each missing frame. The
6167 * current frame is stored for later use.
6169 skb_queue_tail(&chan->srej_q, skb);
6170 skb_in_use = true;
6171 BT_DBG("Queued %p (queue len %d)", skb,
6172 skb_queue_len(&chan->srej_q));
6174 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6175 l2cap_seq_list_clear(&chan->srej_list);
6176 l2cap_send_srej(chan, control->txseq);
6178 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6179 break;
6180 case L2CAP_TXSEQ_DUPLICATE:
6181 l2cap_pass_to_tx(chan, control);
6182 break;
6183 case L2CAP_TXSEQ_INVALID_IGNORE:
6184 break;
6185 case L2CAP_TXSEQ_INVALID:
6186 default:
6187 l2cap_send_disconn_req(chan, ECONNRESET);
6188 break;
6190 break;
6191 case L2CAP_EV_RECV_RR:
6192 l2cap_pass_to_tx(chan, control);
6193 if (control->final) {
6194 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6196 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6197 !__chan_is_moving(chan)) {
6198 control->final = 0;
6199 l2cap_retransmit_all(chan, control);
6202 l2cap_ertm_send(chan);
6203 } else if (control->poll) {
6204 l2cap_send_i_or_rr_or_rnr(chan);
6205 } else {
6206 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6207 &chan->conn_state) &&
6208 chan->unacked_frames)
6209 __set_retrans_timer(chan);
6211 l2cap_ertm_send(chan);
6213 break;
6214 case L2CAP_EV_RECV_RNR:
6215 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6216 l2cap_pass_to_tx(chan, control);
6217 if (control && control->poll) {
6218 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6219 l2cap_send_rr_or_rnr(chan, 0);
6221 __clear_retrans_timer(chan);
6222 l2cap_seq_list_clear(&chan->retrans_list);
6223 break;
6224 case L2CAP_EV_RECV_REJ:
6225 l2cap_handle_rej(chan, control);
6226 break;
6227 case L2CAP_EV_RECV_SREJ:
6228 l2cap_handle_srej(chan, control);
6229 break;
6230 default:
6231 break;
6234 if (skb && !skb_in_use) {
6235 BT_DBG("Freeing %p", skb);
6236 kfree_skb(skb);
6239 return err;
6242 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6243 struct l2cap_ctrl *control,
6244 struct sk_buff *skb, u8 event)
6246 int err = 0;
6247 u16 txseq = control->txseq;
6248 bool skb_in_use = false;
6250 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6251 event);
6253 switch (event) {
6254 case L2CAP_EV_RECV_IFRAME:
6255 switch (l2cap_classify_txseq(chan, txseq)) {
6256 case L2CAP_TXSEQ_EXPECTED:
6257 /* Keep frame for reassembly later */
6258 l2cap_pass_to_tx(chan, control);
6259 skb_queue_tail(&chan->srej_q, skb);
6260 skb_in_use = true;
6261 BT_DBG("Queued %p (queue len %d)", skb,
6262 skb_queue_len(&chan->srej_q));
6264 chan->expected_tx_seq = __next_seq(chan, txseq);
6265 break;
6266 case L2CAP_TXSEQ_EXPECTED_SREJ:
6267 l2cap_seq_list_pop(&chan->srej_list);
6269 l2cap_pass_to_tx(chan, control);
6270 skb_queue_tail(&chan->srej_q, skb);
6271 skb_in_use = true;
6272 BT_DBG("Queued %p (queue len %d)", skb,
6273 skb_queue_len(&chan->srej_q));
6275 err = l2cap_rx_queued_iframes(chan);
6276 if (err)
6277 break;
6279 break;
6280 case L2CAP_TXSEQ_UNEXPECTED:
6281 /* Got a frame that can't be reassembled yet.
6282 * Save it for later, and send SREJs to cover
6283 * the missing frames.
6285 skb_queue_tail(&chan->srej_q, skb);
6286 skb_in_use = true;
6287 BT_DBG("Queued %p (queue len %d)", skb,
6288 skb_queue_len(&chan->srej_q));
6290 l2cap_pass_to_tx(chan, control);
6291 l2cap_send_srej(chan, control->txseq);
6292 break;
6293 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6294 /* This frame was requested with an SREJ, but
6295 * some expected retransmitted frames are
6296 * missing. Request retransmission of missing
6297 * SREJ'd frames.
6299 skb_queue_tail(&chan->srej_q, skb);
6300 skb_in_use = true;
6301 BT_DBG("Queued %p (queue len %d)", skb,
6302 skb_queue_len(&chan->srej_q));
6304 l2cap_pass_to_tx(chan, control);
6305 l2cap_send_srej_list(chan, control->txseq);
6306 break;
6307 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6308 /* We've already queued this frame. Drop this copy. */
6309 l2cap_pass_to_tx(chan, control);
6310 break;
6311 case L2CAP_TXSEQ_DUPLICATE:
6312 /* Expecting a later sequence number, so this frame
6313 * was already received. Ignore it completely.
6315 break;
6316 case L2CAP_TXSEQ_INVALID_IGNORE:
6317 break;
6318 case L2CAP_TXSEQ_INVALID:
6319 default:
6320 l2cap_send_disconn_req(chan, ECONNRESET);
6321 break;
6323 break;
6324 case L2CAP_EV_RECV_RR:
6325 l2cap_pass_to_tx(chan, control);
6326 if (control->final) {
6327 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6329 if (!test_and_clear_bit(CONN_REJ_ACT,
6330 &chan->conn_state)) {
6331 control->final = 0;
6332 l2cap_retransmit_all(chan, control);
6335 l2cap_ertm_send(chan);
6336 } else if (control->poll) {
6337 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6338 &chan->conn_state) &&
6339 chan->unacked_frames) {
6340 __set_retrans_timer(chan);
6343 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6344 l2cap_send_srej_tail(chan);
6345 } else {
6346 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6347 &chan->conn_state) &&
6348 chan->unacked_frames)
6349 __set_retrans_timer(chan);
6351 l2cap_send_ack(chan);
6353 break;
6354 case L2CAP_EV_RECV_RNR:
6355 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6356 l2cap_pass_to_tx(chan, control);
6357 if (control->poll) {
6358 l2cap_send_srej_tail(chan);
6359 } else {
6360 struct l2cap_ctrl rr_control;
6361 memset(&rr_control, 0, sizeof(rr_control));
6362 rr_control.sframe = 1;
6363 rr_control.super = L2CAP_SUPER_RR;
6364 rr_control.reqseq = chan->buffer_seq;
6365 l2cap_send_sframe(chan, &rr_control);
6368 break;
6369 case L2CAP_EV_RECV_REJ:
6370 l2cap_handle_rej(chan, control);
6371 break;
6372 case L2CAP_EV_RECV_SREJ:
6373 l2cap_handle_srej(chan, control);
6374 break;
6377 if (skb && !skb_in_use) {
6378 BT_DBG("Freeing %p", skb);
6379 kfree_skb(skb);
6382 return err;
6385 static int l2cap_finish_move(struct l2cap_chan *chan)
6387 BT_DBG("chan %p", chan);
6389 chan->rx_state = L2CAP_RX_STATE_RECV;
6391 if (chan->hs_hcon)
6392 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6393 else
6394 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6396 return l2cap_resegment(chan);
6399 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6400 struct l2cap_ctrl *control,
6401 struct sk_buff *skb, u8 event)
6403 int err;
6405 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6406 event);
6408 if (!control->poll)
6409 return -EPROTO;
6411 l2cap_process_reqseq(chan, control->reqseq);
6413 if (!skb_queue_empty(&chan->tx_q))
6414 chan->tx_send_head = skb_peek(&chan->tx_q);
6415 else
6416 chan->tx_send_head = NULL;
6418 /* Rewind next_tx_seq to the point expected
6419 * by the receiver.
6421 chan->next_tx_seq = control->reqseq;
6422 chan->unacked_frames = 0;
6424 err = l2cap_finish_move(chan);
6425 if (err)
6426 return err;
6428 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6429 l2cap_send_i_or_rr_or_rnr(chan);
6431 if (event == L2CAP_EV_RECV_IFRAME)
6432 return -EPROTO;
6434 return l2cap_rx_state_recv(chan, control, NULL, event);
6437 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6438 struct l2cap_ctrl *control,
6439 struct sk_buff *skb, u8 event)
6441 int err;
6443 if (!control->final)
6444 return -EPROTO;
6446 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6448 chan->rx_state = L2CAP_RX_STATE_RECV;
6449 l2cap_process_reqseq(chan, control->reqseq);
6451 if (!skb_queue_empty(&chan->tx_q))
6452 chan->tx_send_head = skb_peek(&chan->tx_q);
6453 else
6454 chan->tx_send_head = NULL;
6456 /* Rewind next_tx_seq to the point expected
6457 * by the receiver.
6459 chan->next_tx_seq = control->reqseq;
6460 chan->unacked_frames = 0;
6462 if (chan->hs_hcon)
6463 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6464 else
6465 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6467 err = l2cap_resegment(chan);
6469 if (!err)
6470 err = l2cap_rx_state_recv(chan, control, skb, event);
6472 return err;
6475 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6477 /* Make sure reqseq is for a packet that has been sent but not acked */
6478 u16 unacked;
6480 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6481 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6484 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6485 struct sk_buff *skb, u8 event)
6487 int err = 0;
6489 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6490 control, skb, event, chan->rx_state);
6492 if (__valid_reqseq(chan, control->reqseq)) {
6493 switch (chan->rx_state) {
6494 case L2CAP_RX_STATE_RECV:
6495 err = l2cap_rx_state_recv(chan, control, skb, event);
6496 break;
6497 case L2CAP_RX_STATE_SREJ_SENT:
6498 err = l2cap_rx_state_srej_sent(chan, control, skb,
6499 event);
6500 break;
6501 case L2CAP_RX_STATE_WAIT_P:
6502 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6503 break;
6504 case L2CAP_RX_STATE_WAIT_F:
6505 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6506 break;
6507 default:
6508 /* shut it down */
6509 break;
6511 } else {
6512 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6513 control->reqseq, chan->next_tx_seq,
6514 chan->expected_ack_seq);
6515 l2cap_send_disconn_req(chan, ECONNRESET);
6518 return err;
6521 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6522 struct sk_buff *skb)
6524 int err = 0;
6526 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6527 chan->rx_state);
6529 if (l2cap_classify_txseq(chan, control->txseq) ==
6530 L2CAP_TXSEQ_EXPECTED) {
6531 l2cap_pass_to_tx(chan, control);
6533 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6534 __next_seq(chan, chan->buffer_seq));
6536 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6538 l2cap_reassemble_sdu(chan, skb, control);
6539 } else {
6540 if (chan->sdu) {
6541 kfree_skb(chan->sdu);
6542 chan->sdu = NULL;
6544 chan->sdu_last_frag = NULL;
6545 chan->sdu_len = 0;
6547 if (skb) {
6548 BT_DBG("Freeing %p", skb);
6549 kfree_skb(skb);
6553 chan->last_acked_seq = control->txseq;
6554 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6556 return err;
6559 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6561 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6562 u16 len;
6563 u8 event;
6565 __unpack_control(chan, skb);
6567 len = skb->len;
6570 * We can just drop the corrupted I-frame here.
6571 * Receiver will miss it and start proper recovery
6572 * procedures and ask for retransmission.
6574 if (l2cap_check_fcs(chan, skb))
6575 goto drop;
6577 if (!control->sframe && control->sar == L2CAP_SAR_START)
6578 len -= L2CAP_SDULEN_SIZE;
6580 if (chan->fcs == L2CAP_FCS_CRC16)
6581 len -= L2CAP_FCS_SIZE;
6583 if (len > chan->mps) {
6584 l2cap_send_disconn_req(chan, ECONNRESET);
6585 goto drop;
6588 if (!control->sframe) {
6589 int err;
6591 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6592 control->sar, control->reqseq, control->final,
6593 control->txseq);
6595 /* Validate F-bit - F=0 always valid, F=1 only
6596 * valid in TX WAIT_F
6598 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6599 goto drop;
6601 if (chan->mode != L2CAP_MODE_STREAMING) {
6602 event = L2CAP_EV_RECV_IFRAME;
6603 err = l2cap_rx(chan, control, skb, event);
6604 } else {
6605 err = l2cap_stream_rx(chan, control, skb);
6608 if (err)
6609 l2cap_send_disconn_req(chan, ECONNRESET);
6610 } else {
6611 const u8 rx_func_to_event[4] = {
6612 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6613 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6616 /* Only I-frames are expected in streaming mode */
6617 if (chan->mode == L2CAP_MODE_STREAMING)
6618 goto drop;
6620 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6621 control->reqseq, control->final, control->poll,
6622 control->super);
6624 if (len != 0) {
6625 BT_ERR("Trailing bytes: %d in sframe", len);
6626 l2cap_send_disconn_req(chan, ECONNRESET);
6627 goto drop;
6630 /* Validate F and P bits */
6631 if (control->final && (control->poll ||
6632 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6633 goto drop;
6635 event = rx_func_to_event[control->super];
6636 if (l2cap_rx(chan, control, skb, event))
6637 l2cap_send_disconn_req(chan, ECONNRESET);
6640 return 0;
6642 drop:
6643 kfree_skb(skb);
6644 return 0;
6647 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6649 struct l2cap_conn *conn = chan->conn;
6650 struct l2cap_le_credits pkt;
6651 u16 return_credits;
6653 /* We return more credits to the sender only after the amount of
6654 * credits falls below half of the initial amount.
6656 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6657 return;
6659 return_credits = le_max_credits - chan->rx_credits;
6661 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6663 chan->rx_credits += return_credits;
6665 pkt.cid = cpu_to_le16(chan->scid);
6666 pkt.credits = cpu_to_le16(return_credits);
6668 chan->ident = l2cap_get_ident(conn);
6670 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6673 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6675 int err;
6677 if (!chan->rx_credits) {
6678 BT_ERR("No credits to receive LE L2CAP data");
6679 l2cap_send_disconn_req(chan, ECONNRESET);
6680 return -ENOBUFS;
6683 if (chan->imtu < skb->len) {
6684 BT_ERR("Too big LE L2CAP PDU");
6685 return -ENOBUFS;
6688 chan->rx_credits--;
6689 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6691 l2cap_chan_le_send_credits(chan);
6693 err = 0;
6695 if (!chan->sdu) {
6696 u16 sdu_len;
6698 sdu_len = get_unaligned_le16(skb->data);
6699 skb_pull(skb, L2CAP_SDULEN_SIZE);
6701 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6702 sdu_len, skb->len, chan->imtu);
6704 if (sdu_len > chan->imtu) {
6705 BT_ERR("Too big LE L2CAP SDU length received");
6706 err = -EMSGSIZE;
6707 goto failed;
6710 if (skb->len > sdu_len) {
6711 BT_ERR("Too much LE L2CAP data received");
6712 err = -EINVAL;
6713 goto failed;
6716 if (skb->len == sdu_len)
6717 return chan->ops->recv(chan, skb);
6719 chan->sdu = skb;
6720 chan->sdu_len = sdu_len;
6721 chan->sdu_last_frag = skb;
6723 return 0;
6726 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6727 chan->sdu->len, skb->len, chan->sdu_len);
6729 if (chan->sdu->len + skb->len > chan->sdu_len) {
6730 BT_ERR("Too much LE L2CAP data received");
6731 err = -EINVAL;
6732 goto failed;
6735 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6736 skb = NULL;
6738 if (chan->sdu->len == chan->sdu_len) {
6739 err = chan->ops->recv(chan, chan->sdu);
6740 if (!err) {
6741 chan->sdu = NULL;
6742 chan->sdu_last_frag = NULL;
6743 chan->sdu_len = 0;
6747 failed:
6748 if (err) {
6749 kfree_skb(skb);
6750 kfree_skb(chan->sdu);
6751 chan->sdu = NULL;
6752 chan->sdu_last_frag = NULL;
6753 chan->sdu_len = 0;
6756 /* We can't return an error here since we took care of the skb
6757 * freeing internally. An error return would cause the caller to
6758 * do a double-free of the skb.
6760 return 0;
6763 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6764 struct sk_buff *skb)
6766 struct l2cap_chan *chan;
6768 chan = l2cap_get_chan_by_scid(conn, cid);
6769 if (!chan) {
6770 if (cid == L2CAP_CID_A2MP) {
6771 chan = a2mp_channel_create(conn, skb);
6772 if (!chan) {
6773 kfree_skb(skb);
6774 return;
6777 l2cap_chan_lock(chan);
6778 } else {
6779 BT_DBG("unknown cid 0x%4.4x", cid);
6780 /* Drop packet and return */
6781 kfree_skb(skb);
6782 return;
6786 BT_DBG("chan %p, len %d", chan, skb->len);
6788 if (chan->state != BT_CONNECTED)
6789 goto drop;
6791 switch (chan->mode) {
6792 case L2CAP_MODE_LE_FLOWCTL:
6793 if (l2cap_le_data_rcv(chan, skb) < 0)
6794 goto drop;
6796 goto done;
6798 case L2CAP_MODE_BASIC:
6799 /* If socket recv buffers overflows we drop data here
6800 * which is *bad* because L2CAP has to be reliable.
6801 * But we don't have any other choice. L2CAP doesn't
6802 * provide flow control mechanism. */
6804 if (chan->imtu < skb->len) {
6805 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6806 goto drop;
6809 if (!chan->ops->recv(chan, skb))
6810 goto done;
6811 break;
6813 case L2CAP_MODE_ERTM:
6814 case L2CAP_MODE_STREAMING:
6815 l2cap_data_rcv(chan, skb);
6816 goto done;
6818 default:
6819 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6820 break;
6823 drop:
6824 kfree_skb(skb);
6826 done:
6827 l2cap_chan_unlock(chan);
6830 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6831 struct sk_buff *skb)
6833 struct hci_conn *hcon = conn->hcon;
6834 struct l2cap_chan *chan;
6836 if (hcon->type != ACL_LINK)
6837 goto free_skb;
6839 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6840 ACL_LINK);
6841 if (!chan)
6842 goto free_skb;
6844 BT_DBG("chan %p, len %d", chan, skb->len);
6846 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6847 goto drop;
6849 if (chan->imtu < skb->len)
6850 goto drop;
6852 /* Store remote BD_ADDR and PSM for msg_name */
6853 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6854 bt_cb(skb)->psm = psm;
6856 if (!chan->ops->recv(chan, skb)) {
6857 l2cap_chan_put(chan);
6858 return;
6861 drop:
6862 l2cap_chan_put(chan);
6863 free_skb:
6864 kfree_skb(skb);
6867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6869 struct l2cap_hdr *lh = (void *) skb->data;
6870 struct hci_conn *hcon = conn->hcon;
6871 u16 cid, len;
6872 __le16 psm;
6874 if (hcon->state != BT_CONNECTED) {
6875 BT_DBG("queueing pending rx skb");
6876 skb_queue_tail(&conn->pending_rx, skb);
6877 return;
6880 skb_pull(skb, L2CAP_HDR_SIZE);
6881 cid = __le16_to_cpu(lh->cid);
6882 len = __le16_to_cpu(lh->len);
6884 if (len != skb->len) {
6885 kfree_skb(skb);
6886 return;
6889 /* Since we can't actively block incoming LE connections we must
6890 * at least ensure that we ignore incoming data from them.
6892 if (hcon->type == LE_LINK &&
6893 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6894 bdaddr_dst_type(hcon))) {
6895 kfree_skb(skb);
6896 return;
6899 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6901 switch (cid) {
6902 case L2CAP_CID_SIGNALING:
6903 l2cap_sig_channel(conn, skb);
6904 break;
6906 case L2CAP_CID_CONN_LESS:
6907 psm = get_unaligned((__le16 *) skb->data);
6908 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6909 l2cap_conless_channel(conn, psm, skb);
6910 break;
6912 case L2CAP_CID_LE_SIGNALING:
6913 l2cap_le_sig_channel(conn, skb);
6914 break;
6916 default:
6917 l2cap_data_channel(conn, cid, skb);
6918 break;
6922 static void process_pending_rx(struct work_struct *work)
6924 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6925 pending_rx_work);
6926 struct sk_buff *skb;
6928 BT_DBG("");
6930 while ((skb = skb_dequeue(&conn->pending_rx)))
6931 l2cap_recv_frame(conn, skb);
6934 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6936 struct l2cap_conn *conn = hcon->l2cap_data;
6937 struct hci_chan *hchan;
6939 if (conn)
6940 return conn;
6942 hchan = hci_chan_create(hcon);
6943 if (!hchan)
6944 return NULL;
6946 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6947 if (!conn) {
6948 hci_chan_del(hchan);
6949 return NULL;
6952 kref_init(&conn->ref);
6953 hcon->l2cap_data = conn;
6954 conn->hcon = hci_conn_get(hcon);
6955 conn->hchan = hchan;
6957 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6959 switch (hcon->type) {
6960 case LE_LINK:
6961 if (hcon->hdev->le_mtu) {
6962 conn->mtu = hcon->hdev->le_mtu;
6963 break;
6965 /* fall through */
6966 default:
6967 conn->mtu = hcon->hdev->acl_mtu;
6968 break;
6971 conn->feat_mask = 0;
6973 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6975 if (hcon->type == ACL_LINK &&
6976 test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
6977 conn->local_fixed_chan |= L2CAP_FC_A2MP;
6979 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
6980 (bredr_sc_enabled(hcon->hdev) ||
6981 test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
6982 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6984 mutex_init(&conn->ident_lock);
6985 mutex_init(&conn->chan_lock);
6987 INIT_LIST_HEAD(&conn->chan_l);
6988 INIT_LIST_HEAD(&conn->users);
6990 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6992 skb_queue_head_init(&conn->pending_rx);
6993 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6994 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6996 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6998 return conn;
7001 static bool is_valid_psm(u16 psm, u8 dst_type) {
7002 if (!psm)
7003 return false;
7005 if (bdaddr_type_is_le(dst_type))
7006 return (psm <= 0x00ff);
7008 /* PSM must be odd and lsb of upper byte must be 0 */
7009 return ((psm & 0x0101) == 0x0001);
7012 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7013 bdaddr_t *dst, u8 dst_type)
7015 struct l2cap_conn *conn;
7016 struct hci_conn *hcon;
7017 struct hci_dev *hdev;
7018 int err;
7020 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7021 dst_type, __le16_to_cpu(psm));
7023 hdev = hci_get_route(dst, &chan->src);
7024 if (!hdev)
7025 return -EHOSTUNREACH;
7027 hci_dev_lock(hdev);
7029 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7030 chan->chan_type != L2CAP_CHAN_RAW) {
7031 err = -EINVAL;
7032 goto done;
7035 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7036 err = -EINVAL;
7037 goto done;
7040 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7041 err = -EINVAL;
7042 goto done;
7045 switch (chan->mode) {
7046 case L2CAP_MODE_BASIC:
7047 break;
7048 case L2CAP_MODE_LE_FLOWCTL:
7049 l2cap_le_flowctl_init(chan);
7050 break;
7051 case L2CAP_MODE_ERTM:
7052 case L2CAP_MODE_STREAMING:
7053 if (!disable_ertm)
7054 break;
7055 /* fall through */
7056 default:
7057 err = -EOPNOTSUPP;
7058 goto done;
7061 switch (chan->state) {
7062 case BT_CONNECT:
7063 case BT_CONNECT2:
7064 case BT_CONFIG:
7065 /* Already connecting */
7066 err = 0;
7067 goto done;
7069 case BT_CONNECTED:
7070 /* Already connected */
7071 err = -EISCONN;
7072 goto done;
7074 case BT_OPEN:
7075 case BT_BOUND:
7076 /* Can connect */
7077 break;
7079 default:
7080 err = -EBADFD;
7081 goto done;
7084 /* Set destination address and psm */
7085 bacpy(&chan->dst, dst);
7086 chan->dst_type = dst_type;
7088 chan->psm = psm;
7089 chan->dcid = cid;
7091 if (bdaddr_type_is_le(dst_type)) {
7092 u8 role;
7094 /* Convert from L2CAP channel address type to HCI address type
7096 if (dst_type == BDADDR_LE_PUBLIC)
7097 dst_type = ADDR_LE_DEV_PUBLIC;
7098 else
7099 dst_type = ADDR_LE_DEV_RANDOM;
7101 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7102 role = HCI_ROLE_SLAVE;
7103 else
7104 role = HCI_ROLE_MASTER;
7106 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7107 HCI_LE_CONN_TIMEOUT, role);
7108 } else {
7109 u8 auth_type = l2cap_get_auth_type(chan);
7110 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7113 if (IS_ERR(hcon)) {
7114 err = PTR_ERR(hcon);
7115 goto done;
7118 conn = l2cap_conn_add(hcon);
7119 if (!conn) {
7120 hci_conn_drop(hcon);
7121 err = -ENOMEM;
7122 goto done;
7125 mutex_lock(&conn->chan_lock);
7126 l2cap_chan_lock(chan);
7128 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7129 hci_conn_drop(hcon);
7130 err = -EBUSY;
7131 goto chan_unlock;
7134 /* Update source addr of the socket */
7135 bacpy(&chan->src, &hcon->src);
7136 chan->src_type = bdaddr_src_type(hcon);
7138 __l2cap_chan_add(conn, chan);
7140 /* l2cap_chan_add takes its own ref so we can drop this one */
7141 hci_conn_drop(hcon);
7143 l2cap_state_change(chan, BT_CONNECT);
7144 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7146 /* Release chan->sport so that it can be reused by other
7147 * sockets (as it's only used for listening sockets).
7149 write_lock(&chan_list_lock);
7150 chan->sport = 0;
7151 write_unlock(&chan_list_lock);
7153 if (hcon->state == BT_CONNECTED) {
7154 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7155 __clear_chan_timer(chan);
7156 if (l2cap_chan_check_security(chan, true))
7157 l2cap_state_change(chan, BT_CONNECTED);
7158 } else
7159 l2cap_do_start(chan);
7162 err = 0;
7164 chan_unlock:
7165 l2cap_chan_unlock(chan);
7166 mutex_unlock(&conn->chan_lock);
7167 done:
7168 hci_dev_unlock(hdev);
7169 hci_dev_put(hdev);
7170 return err;
7172 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7174 /* ---- L2CAP interface with lower layer (HCI) ---- */
7176 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7178 int exact = 0, lm1 = 0, lm2 = 0;
7179 struct l2cap_chan *c;
7181 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7183 /* Find listening sockets and check their link_mode */
7184 read_lock(&chan_list_lock);
7185 list_for_each_entry(c, &chan_list, global_l) {
7186 if (c->state != BT_LISTEN)
7187 continue;
7189 if (!bacmp(&c->src, &hdev->bdaddr)) {
7190 lm1 |= HCI_LM_ACCEPT;
7191 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7192 lm1 |= HCI_LM_MASTER;
7193 exact++;
7194 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7195 lm2 |= HCI_LM_ACCEPT;
7196 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7197 lm2 |= HCI_LM_MASTER;
7200 read_unlock(&chan_list_lock);
7202 return exact ? lm1 : lm2;
7205 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7206 * from an existing channel in the list or from the beginning of the
7207 * global list (by passing NULL as first parameter).
7209 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7210 struct hci_conn *hcon)
7212 u8 src_type = bdaddr_src_type(hcon);
7214 read_lock(&chan_list_lock);
7216 if (c)
7217 c = list_next_entry(c, global_l);
7218 else
7219 c = list_entry(chan_list.next, typeof(*c), global_l);
7221 list_for_each_entry_from(c, &chan_list, global_l) {
7222 if (c->chan_type != L2CAP_CHAN_FIXED)
7223 continue;
7224 if (c->state != BT_LISTEN)
7225 continue;
7226 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7227 continue;
7228 if (src_type != c->src_type)
7229 continue;
7231 l2cap_chan_hold(c);
7232 read_unlock(&chan_list_lock);
7233 return c;
7236 read_unlock(&chan_list_lock);
7238 return NULL;
7241 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7243 struct hci_dev *hdev = hcon->hdev;
7244 struct l2cap_conn *conn;
7245 struct l2cap_chan *pchan;
7246 u8 dst_type;
7248 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7250 if (status) {
7251 l2cap_conn_del(hcon, bt_to_errno(status));
7252 return;
7255 conn = l2cap_conn_add(hcon);
7256 if (!conn)
7257 return;
7259 dst_type = bdaddr_dst_type(hcon);
7261 /* If device is blocked, do not create channels for it */
7262 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7263 return;
7265 /* Find fixed channels and notify them of the new connection. We
7266 * use multiple individual lookups, continuing each time where
7267 * we left off, because the list lock would prevent calling the
7268 * potentially sleeping l2cap_chan_lock() function.
7270 pchan = l2cap_global_fixed_chan(NULL, hcon);
7271 while (pchan) {
7272 struct l2cap_chan *chan, *next;
7274 /* Client fixed channels should override server ones */
7275 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7276 goto next;
7278 l2cap_chan_lock(pchan);
7279 chan = pchan->ops->new_connection(pchan);
7280 if (chan) {
7281 bacpy(&chan->src, &hcon->src);
7282 bacpy(&chan->dst, &hcon->dst);
7283 chan->src_type = bdaddr_src_type(hcon);
7284 chan->dst_type = dst_type;
7286 __l2cap_chan_add(conn, chan);
7289 l2cap_chan_unlock(pchan);
7290 next:
7291 next = l2cap_global_fixed_chan(pchan, hcon);
7292 l2cap_chan_put(pchan);
7293 pchan = next;
7296 l2cap_conn_ready(conn);
7299 int l2cap_disconn_ind(struct hci_conn *hcon)
7301 struct l2cap_conn *conn = hcon->l2cap_data;
7303 BT_DBG("hcon %p", hcon);
7305 if (!conn)
7306 return HCI_ERROR_REMOTE_USER_TERM;
7307 return conn->disc_reason;
7310 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7312 BT_DBG("hcon %p reason %d", hcon, reason);
7314 l2cap_conn_del(hcon, bt_to_errno(reason));
7317 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7319 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7320 return;
7322 if (encrypt == 0x00) {
7323 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7324 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7325 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7326 chan->sec_level == BT_SECURITY_FIPS)
7327 l2cap_chan_close(chan, ECONNREFUSED);
7328 } else {
7329 if (chan->sec_level == BT_SECURITY_MEDIUM)
7330 __clear_chan_timer(chan);
7334 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7336 struct l2cap_conn *conn = hcon->l2cap_data;
7337 struct l2cap_chan *chan;
7339 if (!conn)
7340 return 0;
7342 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7344 mutex_lock(&conn->chan_lock);
7346 list_for_each_entry(chan, &conn->chan_l, list) {
7347 l2cap_chan_lock(chan);
7349 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7350 state_to_string(chan->state));
7352 if (chan->scid == L2CAP_CID_A2MP) {
7353 l2cap_chan_unlock(chan);
7354 continue;
7357 if (!status && encrypt)
7358 chan->sec_level = hcon->sec_level;
7360 if (!__l2cap_no_conn_pending(chan)) {
7361 l2cap_chan_unlock(chan);
7362 continue;
7365 if (!status && (chan->state == BT_CONNECTED ||
7366 chan->state == BT_CONFIG)) {
7367 chan->ops->resume(chan);
7368 l2cap_check_encryption(chan, encrypt);
7369 l2cap_chan_unlock(chan);
7370 continue;
7373 if (chan->state == BT_CONNECT) {
7374 if (!status)
7375 l2cap_start_connection(chan);
7376 else
7377 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7378 } else if (chan->state == BT_CONNECT2 &&
7379 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7380 struct l2cap_conn_rsp rsp;
7381 __u16 res, stat;
7383 if (!status) {
7384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7385 res = L2CAP_CR_PEND;
7386 stat = L2CAP_CS_AUTHOR_PEND;
7387 chan->ops->defer(chan);
7388 } else {
7389 l2cap_state_change(chan, BT_CONFIG);
7390 res = L2CAP_CR_SUCCESS;
7391 stat = L2CAP_CS_NO_INFO;
7393 } else {
7394 l2cap_state_change(chan, BT_DISCONN);
7395 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7396 res = L2CAP_CR_SEC_BLOCK;
7397 stat = L2CAP_CS_NO_INFO;
7400 rsp.scid = cpu_to_le16(chan->dcid);
7401 rsp.dcid = cpu_to_le16(chan->scid);
7402 rsp.result = cpu_to_le16(res);
7403 rsp.status = cpu_to_le16(stat);
7404 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7405 sizeof(rsp), &rsp);
7407 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7408 res == L2CAP_CR_SUCCESS) {
7409 char buf[128];
7410 set_bit(CONF_REQ_SENT, &chan->conf_state);
7411 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7412 L2CAP_CONF_REQ,
7413 l2cap_build_conf_req(chan, buf),
7414 buf);
7415 chan->num_conf_req++;
7419 l2cap_chan_unlock(chan);
7422 mutex_unlock(&conn->chan_lock);
7424 return 0;
7427 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7429 struct l2cap_conn *conn = hcon->l2cap_data;
7430 struct l2cap_hdr *hdr;
7431 int len;
7433 /* For AMP controller do not create l2cap conn */
7434 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7435 goto drop;
7437 if (!conn)
7438 conn = l2cap_conn_add(hcon);
7440 if (!conn)
7441 goto drop;
7443 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7445 switch (flags) {
7446 case ACL_START:
7447 case ACL_START_NO_FLUSH:
7448 case ACL_COMPLETE:
7449 if (conn->rx_len) {
7450 BT_ERR("Unexpected start frame (len %d)", skb->len);
7451 kfree_skb(conn->rx_skb);
7452 conn->rx_skb = NULL;
7453 conn->rx_len = 0;
7454 l2cap_conn_unreliable(conn, ECOMM);
7457 /* Start fragment always begin with Basic L2CAP header */
7458 if (skb->len < L2CAP_HDR_SIZE) {
7459 BT_ERR("Frame is too short (len %d)", skb->len);
7460 l2cap_conn_unreliable(conn, ECOMM);
7461 goto drop;
7464 hdr = (struct l2cap_hdr *) skb->data;
7465 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7467 if (len == skb->len) {
7468 /* Complete frame received */
7469 l2cap_recv_frame(conn, skb);
7470 return 0;
7473 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7475 if (skb->len > len) {
7476 BT_ERR("Frame is too long (len %d, expected len %d)",
7477 skb->len, len);
7478 l2cap_conn_unreliable(conn, ECOMM);
7479 goto drop;
7482 /* Allocate skb for the complete frame (with header) */
7483 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7484 if (!conn->rx_skb)
7485 goto drop;
7487 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7488 skb->len);
7489 conn->rx_len = len - skb->len;
7490 break;
7492 case ACL_CONT:
7493 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7495 if (!conn->rx_len) {
7496 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7497 l2cap_conn_unreliable(conn, ECOMM);
7498 goto drop;
7501 if (skb->len > conn->rx_len) {
7502 BT_ERR("Fragment is too long (len %d, expected %d)",
7503 skb->len, conn->rx_len);
7504 kfree_skb(conn->rx_skb);
7505 conn->rx_skb = NULL;
7506 conn->rx_len = 0;
7507 l2cap_conn_unreliable(conn, ECOMM);
7508 goto drop;
7511 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7512 skb->len);
7513 conn->rx_len -= skb->len;
7515 if (!conn->rx_len) {
7516 /* Complete frame received. l2cap_recv_frame
7517 * takes ownership of the skb so set the global
7518 * rx_skb pointer to NULL first.
7520 struct sk_buff *rx_skb = conn->rx_skb;
7521 conn->rx_skb = NULL;
7522 l2cap_recv_frame(conn, rx_skb);
7524 break;
7527 drop:
7528 kfree_skb(skb);
7529 return 0;
7532 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7534 struct l2cap_chan *c;
7536 read_lock(&chan_list_lock);
7538 list_for_each_entry(c, &chan_list, global_l) {
7539 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7540 &c->src, c->src_type, &c->dst, c->dst_type,
7541 c->state, __le16_to_cpu(c->psm),
7542 c->scid, c->dcid, c->imtu, c->omtu,
7543 c->sec_level, c->mode);
7546 read_unlock(&chan_list_lock);
7548 return 0;
7551 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7553 return single_open(file, l2cap_debugfs_show, inode->i_private);
7556 static const struct file_operations l2cap_debugfs_fops = {
7557 .open = l2cap_debugfs_open,
7558 .read = seq_read,
7559 .llseek = seq_lseek,
7560 .release = single_release,
7563 static struct dentry *l2cap_debugfs;
7565 int __init l2cap_init(void)
7567 int err;
7569 err = l2cap_init_sockets();
7570 if (err < 0)
7571 return err;
7573 if (IS_ERR_OR_NULL(bt_debugfs))
7574 return 0;
7576 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7577 NULL, &l2cap_debugfs_fops);
7579 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7580 &le_max_credits);
7581 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7582 &le_default_mps);
7584 return 0;
7587 void l2cap_exit(void)
7589 debugfs_remove(l2cap_debugfs);
7590 l2cap_cleanup_sockets();
7593 module_param(disable_ertm, bool, 0644);
7594 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");