ALSA: usb-audio: Fix an out-of-bound read in create_composite_quirks
[linux/fpc-iii.git] / net / bluetooth / l2cap_core.c
blobaf68674690af149843d6a97364ca004287b5132e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
44 #define LE_FLOWCTL_MAX_CREDITS 65535
46 bool disable_ertm;
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
75 return BDADDR_BREDR;
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 return bdaddr_type(hcon->type, hcon->src_type);
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 return bdaddr_type(hcon->type, hcon->dst_type);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
93 struct l2cap_chan *c;
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
99 return NULL;
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
111 return NULL;
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
119 struct l2cap_chan *c;
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c)
124 l2cap_chan_lock(c);
125 mutex_unlock(&conn->chan_lock);
127 return c;
130 /* Find channel with given DCID.
131 * Returns locked channel.
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
144 return c;
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
150 struct l2cap_chan *c;
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
156 return NULL;
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
162 struct l2cap_chan *c;
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
170 return c;
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 struct l2cap_chan *c;
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
179 return c;
181 return NULL;
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 int err;
188 write_lock(&chan_list_lock);
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 err = -EADDRINUSE;
192 goto done;
195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
212 done:
213 write_unlock(&chan_list_lock);
214 return err;
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
220 write_lock(&chan_list_lock);
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
226 chan->scid = scid;
228 write_unlock(&chan_list_lock);
230 return 0;
233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
235 u16 cid, dyn_end;
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
242 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
244 return cid;
247 return 0;
250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
255 chan->state = state;
256 chan->ops->state_change(chan, state, 0);
259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
262 chan->state = state;
263 chan->ops->state_change(chan, chan->state, err);
266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
268 chan->ops->state_change(chan, chan->state, err);
271 static void __set_retrans_timer(struct l2cap_chan *chan)
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
280 static void __set_monitor_timer(struct l2cap_chan *chan)
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
292 struct sk_buff *skb;
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->l2cap.txseq == seq)
296 return skb;
299 return NULL;
302 /* ---- L2CAP sequence number lists ---- */
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
315 size_t alloc_size, i;
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
321 alloc_size = roundup_pow_of_two(size);
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
333 return 0;
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
338 kfree(seq_list->list);
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
361 return seq;
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
366 u16 i;
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
434 mutex_init(&chan->lock);
436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
439 write_lock(&chan_list_lock);
440 list_add(&chan->global_l, &chan_list);
441 write_unlock(&chan_list_lock);
443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
445 chan->state = BT_OPEN;
447 kref_init(&chan->kref);
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
452 BT_DBG("chan %p", chan);
454 return chan;
456 EXPORT_SYMBOL_GPL(l2cap_chan_create);
458 static void l2cap_chan_destroy(struct kref *kref)
460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
462 BT_DBG("chan %p", chan);
464 write_lock(&chan_list_lock);
465 list_del(&chan->global_l);
466 write_unlock(&chan_list_lock);
468 kfree(chan);
471 void l2cap_chan_hold(struct l2cap_chan *c)
473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 kref_get(&c->kref);
478 void l2cap_chan_put(struct l2cap_chan *c)
480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
482 kref_put(&c->kref, l2cap_chan_destroy);
484 EXPORT_SYMBOL_GPL(l2cap_chan_put);
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->sec_level = BT_SECURITY_LOW;
496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
505 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
510 chan->tx_credits = 0;
511 chan->rx_credits = le_max_credits;
512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
514 skb_queue_head_init(&chan->tx_q);
517 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
520 __le16_to_cpu(chan->psm), chan->dcid);
522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
524 chan->conn = conn;
526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 break;
534 case L2CAP_CHAN_CONN_LESS:
535 /* Connectionless socket */
536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
538 chan->omtu = L2CAP_DEFAULT_MTU;
539 break;
541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
543 break;
545 default:
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
559 l2cap_chan_hold(chan);
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
566 list_add(&chan->list, &conn->chan_l);
569 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
573 mutex_unlock(&conn->chan_lock);
576 void l2cap_chan_del(struct l2cap_chan *chan, int err)
578 struct l2cap_conn *conn = chan->conn;
580 __clear_chan_timer(chan);
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
585 chan->ops->teardown(chan, err);
587 if (conn) {
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
592 l2cap_chan_put(chan);
594 chan->conn = NULL;
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
602 hci_conn_drop(conn->hcon);
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
631 skb_queue_purge(&chan->srej_q);
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
636 /* fall through */
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
643 return;
645 EXPORT_SYMBOL_GPL(l2cap_chan_del);
647 static void l2cap_conn_update_id_addr(struct work_struct *work)
649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
652 struct l2cap_chan *chan;
654 mutex_lock(&conn->chan_lock);
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
659 chan->dst_type = bdaddr_dst_type(hcon);
660 l2cap_chan_unlock(chan);
663 mutex_unlock(&conn->chan_lock);
666 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
677 l2cap_state_change(chan, BT_DISCONN);
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
681 rsp.mps = cpu_to_le16(chan->mps);
682 rsp.credits = cpu_to_le16(chan->rx_credits);
683 rsp.result = cpu_to_le16(result);
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
689 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
700 l2cap_state_change(chan, BT_DISCONN);
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
710 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
712 struct l2cap_conn *conn = chan->conn;
714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
716 switch (chan->state) {
717 case BT_LISTEN:
718 chan->ops->teardown(chan, 0);
719 break;
721 case BT_CONNECTED:
722 case BT_CONFIG:
723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
725 l2cap_send_disconn_req(chan, reason);
726 } else
727 l2cap_chan_del(chan, reason);
728 break;
730 case BT_CONNECT2:
731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
738 l2cap_chan_del(chan, reason);
739 break;
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
746 default:
747 chan->ops->teardown(chan, 0);
748 break;
751 EXPORT_SYMBOL(l2cap_chan_close);
753 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 case BT_SECURITY_FIPS:
760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
766 break;
767 case L2CAP_CHAN_CONN_LESS:
768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
778 case L2CAP_CHAN_CONN_ORIENTED:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
789 /* fall through */
790 default:
791 switch (chan->sec_level) {
792 case BT_SECURITY_HIGH:
793 case BT_SECURITY_FIPS:
794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
800 break;
804 /* Service level security */
805 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
807 struct l2cap_conn *conn = chan->conn;
808 __u8 auth_type;
810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
813 auth_type = l2cap_get_auth_type(chan);
815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
819 static u8 l2cap_get_ident(struct l2cap_conn *conn)
821 u8 id;
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
829 mutex_lock(&conn->ident_lock);
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
834 id = conn->tx_ident;
836 mutex_unlock(&conn->ident_lock);
838 return id;
841 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
845 u8 flags;
847 BT_DBG("code 0x%2.2x", code);
849 if (!skb)
850 return;
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
861 skb->priority = HCI_PRIO_MAX;
863 hci_send_acl(conn->hchan, skb, flags);
866 static bool __chan_is_moving(struct l2cap_chan *chan)
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
872 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
878 skb->priority);
880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
886 return;
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
904 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
923 control->poll = 0;
924 control->super = 0;
928 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
947 control->poll = 0;
948 control->super = 0;
952 static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->l2cap);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->l2cap);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
966 static u32 __pack_extended_control(struct l2cap_ctrl *control)
968 u32 packed;
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
982 return packed;
985 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
987 u16 packed;
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1001 return packed;
1004 static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1025 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
1030 int hlen = __ertm_hdr_size(chan);
1032 if (chan->fcs == L2CAP_FCS_CRC16)
1033 hlen += L2CAP_FCS_SIZE;
1035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1037 if (!skb)
1038 return ERR_PTR(-ENOMEM);
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1042 lh->cid = cpu_to_le16(chan->dcid);
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1049 if (chan->fcs == L2CAP_FCS_CRC16) {
1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1054 skb->priority = HCI_PRIO_MAX;
1055 return skb;
1058 static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1061 struct sk_buff *skb;
1062 u32 control_field;
1064 BT_DBG("chan %p, control %p", chan, control);
1066 if (!control->sframe)
1067 return;
1069 if (__chan_is_moving(chan))
1070 return;
1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1101 struct l2cap_ctrl control;
1103 BT_DBG("chan %p, poll %d", chan, poll);
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1126 static bool __amp_capable(struct l2cap_chan *chan)
1128 struct l2cap_conn *conn = chan->conn;
1129 struct hci_dev *hdev;
1130 bool amp_available = false;
1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1133 return false;
1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1136 return false;
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1146 read_unlock(&hci_dev_list_lock);
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
1151 return false;
1154 static bool l2cap_check_efs(struct l2cap_chan *chan)
1156 /* Check EFS parameters */
1157 return true;
1160 void l2cap_send_conn_req(struct l2cap_chan *chan)
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1168 chan->ident = l2cap_get_ident(conn);
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1175 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1182 chan->ident = l2cap_get_ident(chan->conn);
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1188 static void l2cap_move_setup(struct l2cap_chan *chan)
1190 struct sk_buff *skb;
1192 BT_DBG("chan %p", chan);
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->l2cap.retries)
1204 bt_cb(skb)->l2cap.retries = 1;
1205 else
1206 break;
1209 chan->expected_tx_seq = chan->buffer_seq;
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1223 static void l2cap_move_done(struct l2cap_chan *chan)
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1245 static void l2cap_chan_ready(struct l2cap_chan *chan)
1247 /* The channel may have already been flagged as connected in
1248 * case of receiving data before the L2CAP info req/rsp
1249 * procedure is complete.
1251 if (chan->state == BT_CONNECTED)
1252 return;
1254 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1255 chan->conf_state = 0;
1256 __clear_chan_timer(chan);
1258 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1259 chan->ops->suspend(chan);
1261 chan->state = BT_CONNECTED;
1263 chan->ops->ready(chan);
1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1274 req.psm = chan->psm;
1275 req.scid = cpu_to_le16(chan->scid);
1276 req.mtu = cpu_to_le16(chan->imtu);
1277 req.mps = cpu_to_le16(chan->mps);
1278 req.credits = cpu_to_le16(chan->rx_credits);
1280 chan->ident = l2cap_get_ident(conn);
1282 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1283 sizeof(req), &req);
1286 static void l2cap_le_start(struct l2cap_chan *chan)
1288 struct l2cap_conn *conn = chan->conn;
1290 if (!smp_conn_security(conn->hcon, chan->sec_level))
1291 return;
1293 if (!chan->psm) {
1294 l2cap_chan_ready(chan);
1295 return;
1298 if (chan->state == BT_CONNECT)
1299 l2cap_le_connect(chan);
1302 static void l2cap_start_connection(struct l2cap_chan *chan)
1304 if (__amp_capable(chan)) {
1305 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1306 a2mp_discover_amp(chan);
1307 } else if (chan->conn->hcon->type == LE_LINK) {
1308 l2cap_le_start(chan);
1309 } else {
1310 l2cap_send_conn_req(chan);
1314 static void l2cap_request_info(struct l2cap_conn *conn)
1316 struct l2cap_info_req req;
1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1319 return;
1321 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1323 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1324 conn->info_ident = l2cap_get_ident(conn);
1326 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1328 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1329 sizeof(req), &req);
1332 static void l2cap_do_start(struct l2cap_chan *chan)
1334 struct l2cap_conn *conn = chan->conn;
1336 if (conn->hcon->type == LE_LINK) {
1337 l2cap_le_start(chan);
1338 return;
1341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1342 l2cap_request_info(conn);
1343 return;
1346 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1347 return;
1349 if (l2cap_chan_check_security(chan, true) &&
1350 __l2cap_no_conn_pending(chan))
1351 l2cap_start_connection(chan);
1354 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1356 u32 local_feat_mask = l2cap_feat_mask;
1357 if (!disable_ertm)
1358 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1360 switch (mode) {
1361 case L2CAP_MODE_ERTM:
1362 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1363 case L2CAP_MODE_STREAMING:
1364 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1365 default:
1366 return 0x00;
1370 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1372 struct l2cap_conn *conn = chan->conn;
1373 struct l2cap_disconn_req req;
1375 if (!conn)
1376 return;
1378 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1379 __clear_retrans_timer(chan);
1380 __clear_monitor_timer(chan);
1381 __clear_ack_timer(chan);
1384 if (chan->scid == L2CAP_CID_A2MP) {
1385 l2cap_state_change(chan, BT_DISCONN);
1386 return;
1389 req.dcid = cpu_to_le16(chan->dcid);
1390 req.scid = cpu_to_le16(chan->scid);
1391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1392 sizeof(req), &req);
1394 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1397 /* ---- L2CAP connections ---- */
1398 static void l2cap_conn_start(struct l2cap_conn *conn)
1400 struct l2cap_chan *chan, *tmp;
1402 BT_DBG("conn %p", conn);
1404 mutex_lock(&conn->chan_lock);
1406 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1407 l2cap_chan_lock(chan);
1409 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1410 l2cap_chan_ready(chan);
1411 l2cap_chan_unlock(chan);
1412 continue;
1415 if (chan->state == BT_CONNECT) {
1416 if (!l2cap_chan_check_security(chan, true) ||
1417 !__l2cap_no_conn_pending(chan)) {
1418 l2cap_chan_unlock(chan);
1419 continue;
1422 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1423 && test_bit(CONF_STATE2_DEVICE,
1424 &chan->conf_state)) {
1425 l2cap_chan_close(chan, ECONNRESET);
1426 l2cap_chan_unlock(chan);
1427 continue;
1430 l2cap_start_connection(chan);
1432 } else if (chan->state == BT_CONNECT2) {
1433 struct l2cap_conn_rsp rsp;
1434 char buf[128];
1435 rsp.scid = cpu_to_le16(chan->dcid);
1436 rsp.dcid = cpu_to_le16(chan->scid);
1438 if (l2cap_chan_check_security(chan, false)) {
1439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1442 chan->ops->defer(chan);
1444 } else {
1445 l2cap_state_change(chan, BT_CONFIG);
1446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1449 } else {
1450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1455 sizeof(rsp), &rsp);
1457 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1458 rsp.result != L2CAP_CR_SUCCESS) {
1459 l2cap_chan_unlock(chan);
1460 continue;
1463 set_bit(CONF_REQ_SENT, &chan->conf_state);
1464 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1465 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1466 chan->num_conf_req++;
1469 l2cap_chan_unlock(chan);
1472 mutex_unlock(&conn->chan_lock);
1475 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1477 struct hci_conn *hcon = conn->hcon;
1478 struct hci_dev *hdev = hcon->hdev;
1480 BT_DBG("%s conn %p", hdev->name, conn);
1482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1485 if (hcon->out)
1486 smp_conn_security(hcon, hcon->pending_sec_level);
1488 /* For LE slave connections, make sure the connection interval
1489 * is in the range of the minium and maximum interval that has
1490 * been configured for this connection. If not, then trigger
1491 * the connection update procedure.
1493 if (hcon->role == HCI_ROLE_SLAVE &&
1494 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1495 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1496 struct l2cap_conn_param_update_req req;
1498 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1499 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1500 req.latency = cpu_to_le16(hcon->le_conn_latency);
1501 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1503 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1504 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1508 static void l2cap_conn_ready(struct l2cap_conn *conn)
1510 struct l2cap_chan *chan;
1511 struct hci_conn *hcon = conn->hcon;
1513 BT_DBG("conn %p", conn);
1515 if (hcon->type == ACL_LINK)
1516 l2cap_request_info(conn);
1518 mutex_lock(&conn->chan_lock);
1520 list_for_each_entry(chan, &conn->chan_l, list) {
1522 l2cap_chan_lock(chan);
1524 if (chan->scid == L2CAP_CID_A2MP) {
1525 l2cap_chan_unlock(chan);
1526 continue;
1529 if (hcon->type == LE_LINK) {
1530 l2cap_le_start(chan);
1531 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1533 l2cap_chan_ready(chan);
1534 } else if (chan->state == BT_CONNECT) {
1535 l2cap_do_start(chan);
1538 l2cap_chan_unlock(chan);
1541 mutex_unlock(&conn->chan_lock);
1543 if (hcon->type == LE_LINK)
1544 l2cap_le_conn_ready(conn);
1546 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1549 /* Notify sockets that we cannot guaranty reliability anymore */
1550 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1552 struct l2cap_chan *chan;
1554 BT_DBG("conn %p", conn);
1556 mutex_lock(&conn->chan_lock);
1558 list_for_each_entry(chan, &conn->chan_l, list) {
1559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1560 l2cap_chan_set_err(chan, err);
1563 mutex_unlock(&conn->chan_lock);
1566 static void l2cap_info_timeout(struct work_struct *work)
1568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1569 info_timer.work);
1571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1572 conn->info_ident = 0;
1574 l2cap_conn_start(conn);
1578 * l2cap_user
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1590 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1592 struct hci_dev *hdev = conn->hcon->hdev;
1593 int ret;
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1600 * here, too. */
1602 hci_dev_lock(hdev);
1604 if (!list_empty(&user->list)) {
1605 ret = -EINVAL;
1606 goto out_unlock;
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1610 if (!conn->hchan) {
1611 ret = -ENODEV;
1612 goto out_unlock;
1615 ret = user->probe(conn, user);
1616 if (ret)
1617 goto out_unlock;
1619 list_add(&user->list, &conn->users);
1620 ret = 0;
1622 out_unlock:
1623 hci_dev_unlock(hdev);
1624 return ret;
1626 EXPORT_SYMBOL(l2cap_register_user);
1628 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1630 struct hci_dev *hdev = conn->hcon->hdev;
1632 hci_dev_lock(hdev);
1634 if (list_empty(&user->list))
1635 goto out_unlock;
1637 list_del_init(&user->list);
1638 user->remove(conn, user);
1640 out_unlock:
1641 hci_dev_unlock(hdev);
1643 EXPORT_SYMBOL(l2cap_unregister_user);
1645 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1647 struct l2cap_user *user;
1649 while (!list_empty(&conn->users)) {
1650 user = list_first_entry(&conn->users, struct l2cap_user, list);
1651 list_del_init(&user->list);
1652 user->remove(conn, user);
1656 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1658 struct l2cap_conn *conn = hcon->l2cap_data;
1659 struct l2cap_chan *chan, *l;
1661 if (!conn)
1662 return;
1664 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1666 kfree_skb(conn->rx_skb);
1668 skb_queue_purge(&conn->pending_rx);
1670 /* We can not call flush_work(&conn->pending_rx_work) here since we
1671 * might block if we are running on a worker from the same workqueue
1672 * pending_rx_work is waiting on.
1674 if (work_pending(&conn->pending_rx_work))
1675 cancel_work_sync(&conn->pending_rx_work);
1677 if (work_pending(&conn->id_addr_update_work))
1678 cancel_work_sync(&conn->id_addr_update_work);
1680 l2cap_unregister_all_users(conn);
1682 /* Force the connection to be immediately dropped */
1683 hcon->disc_timeout = 0;
1685 mutex_lock(&conn->chan_lock);
1687 /* Kill channels */
1688 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1689 l2cap_chan_hold(chan);
1690 l2cap_chan_lock(chan);
1692 l2cap_chan_del(chan, err);
1694 l2cap_chan_unlock(chan);
1696 chan->ops->close(chan);
1697 l2cap_chan_put(chan);
1700 mutex_unlock(&conn->chan_lock);
1702 hci_chan_del(conn->hchan);
1704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1705 cancel_delayed_work_sync(&conn->info_timer);
1707 hcon->l2cap_data = NULL;
1708 conn->hchan = NULL;
1709 l2cap_conn_put(conn);
1712 static void l2cap_conn_free(struct kref *ref)
1714 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1716 hci_conn_put(conn->hcon);
1717 kfree(conn);
1720 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1722 kref_get(&conn->ref);
1723 return conn;
1725 EXPORT_SYMBOL(l2cap_conn_get);
1727 void l2cap_conn_put(struct l2cap_conn *conn)
1729 kref_put(&conn->ref, l2cap_conn_free);
1731 EXPORT_SYMBOL(l2cap_conn_put);
1733 /* ---- Socket interface ---- */
1735 /* Find socket with psm and source / destination bdaddr.
1736 * Returns closest match.
1738 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1739 bdaddr_t *src,
1740 bdaddr_t *dst,
1741 u8 link_type)
1743 struct l2cap_chan *c, *c1 = NULL;
1745 read_lock(&chan_list_lock);
1747 list_for_each_entry(c, &chan_list, global_l) {
1748 if (state && c->state != state)
1749 continue;
1751 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1752 continue;
1754 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1755 continue;
1757 if (c->psm == psm) {
1758 int src_match, dst_match;
1759 int src_any, dst_any;
1761 /* Exact match. */
1762 src_match = !bacmp(&c->src, src);
1763 dst_match = !bacmp(&c->dst, dst);
1764 if (src_match && dst_match) {
1765 l2cap_chan_hold(c);
1766 read_unlock(&chan_list_lock);
1767 return c;
1770 /* Closest match */
1771 src_any = !bacmp(&c->src, BDADDR_ANY);
1772 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1773 if ((src_match && dst_any) || (src_any && dst_match) ||
1774 (src_any && dst_any))
1775 c1 = c;
1779 if (c1)
1780 l2cap_chan_hold(c1);
1782 read_unlock(&chan_list_lock);
1784 return c1;
1787 static void l2cap_monitor_timeout(struct work_struct *work)
1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1790 monitor_timer.work);
1792 BT_DBG("chan %p", chan);
1794 l2cap_chan_lock(chan);
1796 if (!chan->conn) {
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1799 return;
1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1804 l2cap_chan_unlock(chan);
1805 l2cap_chan_put(chan);
1808 static void l2cap_retrans_timeout(struct work_struct *work)
1810 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1811 retrans_timer.work);
1813 BT_DBG("chan %p", chan);
1815 l2cap_chan_lock(chan);
1817 if (!chan->conn) {
1818 l2cap_chan_unlock(chan);
1819 l2cap_chan_put(chan);
1820 return;
1823 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1824 l2cap_chan_unlock(chan);
1825 l2cap_chan_put(chan);
1828 static void l2cap_streaming_send(struct l2cap_chan *chan,
1829 struct sk_buff_head *skbs)
1831 struct sk_buff *skb;
1832 struct l2cap_ctrl *control;
1834 BT_DBG("chan %p, skbs %p", chan, skbs);
1836 if (__chan_is_moving(chan))
1837 return;
1839 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1841 while (!skb_queue_empty(&chan->tx_q)) {
1843 skb = skb_dequeue(&chan->tx_q);
1845 bt_cb(skb)->l2cap.retries = 1;
1846 control = &bt_cb(skb)->l2cap;
1848 control->reqseq = 0;
1849 control->txseq = chan->next_tx_seq;
1851 __pack_control(chan, control, skb);
1853 if (chan->fcs == L2CAP_FCS_CRC16) {
1854 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1855 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1858 l2cap_do_send(chan, skb);
1860 BT_DBG("Sent txseq %u", control->txseq);
1862 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1863 chan->frames_sent++;
1867 static int l2cap_ertm_send(struct l2cap_chan *chan)
1869 struct sk_buff *skb, *tx_skb;
1870 struct l2cap_ctrl *control;
1871 int sent = 0;
1873 BT_DBG("chan %p", chan);
1875 if (chan->state != BT_CONNECTED)
1876 return -ENOTCONN;
1878 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1879 return 0;
1881 if (__chan_is_moving(chan))
1882 return 0;
1884 while (chan->tx_send_head &&
1885 chan->unacked_frames < chan->remote_tx_win &&
1886 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1888 skb = chan->tx_send_head;
1890 bt_cb(skb)->l2cap.retries = 1;
1891 control = &bt_cb(skb)->l2cap;
1893 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1894 control->final = 1;
1896 control->reqseq = chan->buffer_seq;
1897 chan->last_acked_seq = chan->buffer_seq;
1898 control->txseq = chan->next_tx_seq;
1900 __pack_control(chan, control, skb);
1902 if (chan->fcs == L2CAP_FCS_CRC16) {
1903 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1904 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1907 /* Clone after data has been modified. Data is assumed to be
1908 read-only (for locking purposes) on cloned sk_buffs.
1910 tx_skb = skb_clone(skb, GFP_KERNEL);
1912 if (!tx_skb)
1913 break;
1915 __set_retrans_timer(chan);
1917 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1918 chan->unacked_frames++;
1919 chan->frames_sent++;
1920 sent++;
1922 if (skb_queue_is_last(&chan->tx_q, skb))
1923 chan->tx_send_head = NULL;
1924 else
1925 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1927 l2cap_do_send(chan, tx_skb);
1928 BT_DBG("Sent txseq %u", control->txseq);
1931 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1932 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1934 return sent;
1937 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1939 struct l2cap_ctrl control;
1940 struct sk_buff *skb;
1941 struct sk_buff *tx_skb;
1942 u16 seq;
1944 BT_DBG("chan %p", chan);
1946 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 return;
1949 if (__chan_is_moving(chan))
1950 return;
1952 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1953 seq = l2cap_seq_list_pop(&chan->retrans_list);
1955 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1956 if (!skb) {
1957 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1958 seq);
1959 continue;
1962 bt_cb(skb)->l2cap.retries++;
1963 control = bt_cb(skb)->l2cap;
1965 if (chan->max_tx != 0 &&
1966 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1967 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1968 l2cap_send_disconn_req(chan, ECONNRESET);
1969 l2cap_seq_list_clear(&chan->retrans_list);
1970 break;
1973 control.reqseq = chan->buffer_seq;
1974 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1975 control.final = 1;
1976 else
1977 control.final = 0;
1979 if (skb_cloned(skb)) {
1980 /* Cloned sk_buffs are read-only, so we need a
1981 * writeable copy
1983 tx_skb = skb_copy(skb, GFP_KERNEL);
1984 } else {
1985 tx_skb = skb_clone(skb, GFP_KERNEL);
1988 if (!tx_skb) {
1989 l2cap_seq_list_clear(&chan->retrans_list);
1990 break;
1993 /* Update skb contents */
1994 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1995 put_unaligned_le32(__pack_extended_control(&control),
1996 tx_skb->data + L2CAP_HDR_SIZE);
1997 } else {
1998 put_unaligned_le16(__pack_enhanced_control(&control),
1999 tx_skb->data + L2CAP_HDR_SIZE);
2002 /* Update FCS */
2003 if (chan->fcs == L2CAP_FCS_CRC16) {
2004 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2005 tx_skb->len - L2CAP_FCS_SIZE);
2006 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2007 L2CAP_FCS_SIZE);
2010 l2cap_do_send(chan, tx_skb);
2012 BT_DBG("Resent txseq %d", control.txseq);
2014 chan->last_acked_seq = chan->buffer_seq;
2018 static void l2cap_retransmit(struct l2cap_chan *chan,
2019 struct l2cap_ctrl *control)
2021 BT_DBG("chan %p, control %p", chan, control);
2023 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2024 l2cap_ertm_resend(chan);
2027 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2028 struct l2cap_ctrl *control)
2030 struct sk_buff *skb;
2032 BT_DBG("chan %p, control %p", chan, control);
2034 if (control->poll)
2035 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2037 l2cap_seq_list_clear(&chan->retrans_list);
2039 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2040 return;
2042 if (chan->unacked_frames) {
2043 skb_queue_walk(&chan->tx_q, skb) {
2044 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2045 skb == chan->tx_send_head)
2046 break;
2049 skb_queue_walk_from(&chan->tx_q, skb) {
2050 if (skb == chan->tx_send_head)
2051 break;
2053 l2cap_seq_list_append(&chan->retrans_list,
2054 bt_cb(skb)->l2cap.txseq);
2057 l2cap_ertm_resend(chan);
2061 static void l2cap_send_ack(struct l2cap_chan *chan)
2063 struct l2cap_ctrl control;
2064 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2065 chan->last_acked_seq);
2066 int threshold;
2068 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2069 chan, chan->last_acked_seq, chan->buffer_seq);
2071 memset(&control, 0, sizeof(control));
2072 control.sframe = 1;
2074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2075 chan->rx_state == L2CAP_RX_STATE_RECV) {
2076 __clear_ack_timer(chan);
2077 control.super = L2CAP_SUPER_RNR;
2078 control.reqseq = chan->buffer_seq;
2079 l2cap_send_sframe(chan, &control);
2080 } else {
2081 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2082 l2cap_ertm_send(chan);
2083 /* If any i-frames were sent, they included an ack */
2084 if (chan->buffer_seq == chan->last_acked_seq)
2085 frames_to_ack = 0;
2088 /* Ack now if the window is 3/4ths full.
2089 * Calculate without mul or div
2091 threshold = chan->ack_win;
2092 threshold += threshold << 1;
2093 threshold >>= 2;
2095 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2096 threshold);
2098 if (frames_to_ack >= threshold) {
2099 __clear_ack_timer(chan);
2100 control.super = L2CAP_SUPER_RR;
2101 control.reqseq = chan->buffer_seq;
2102 l2cap_send_sframe(chan, &control);
2103 frames_to_ack = 0;
2106 if (frames_to_ack)
2107 __set_ack_timer(chan);
2111 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2112 struct msghdr *msg, int len,
2113 int count, struct sk_buff *skb)
2115 struct l2cap_conn *conn = chan->conn;
2116 struct sk_buff **frag;
2117 int sent = 0;
2119 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2120 return -EFAULT;
2122 sent += count;
2123 len -= count;
2125 /* Continuation fragments (no L2CAP header) */
2126 frag = &skb_shinfo(skb)->frag_list;
2127 while (len) {
2128 struct sk_buff *tmp;
2130 count = min_t(unsigned int, conn->mtu, len);
2132 tmp = chan->ops->alloc_skb(chan, 0, count,
2133 msg->msg_flags & MSG_DONTWAIT);
2134 if (IS_ERR(tmp))
2135 return PTR_ERR(tmp);
2137 *frag = tmp;
2139 if (copy_from_iter(skb_put(*frag, count), count,
2140 &msg->msg_iter) != count)
2141 return -EFAULT;
2143 sent += count;
2144 len -= count;
2146 skb->len += (*frag)->len;
2147 skb->data_len += (*frag)->len;
2149 frag = &(*frag)->next;
2152 return sent;
2155 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2156 struct msghdr *msg, size_t len)
2158 struct l2cap_conn *conn = chan->conn;
2159 struct sk_buff *skb;
2160 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2161 struct l2cap_hdr *lh;
2163 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2164 __le16_to_cpu(chan->psm), len);
2166 count = min_t(unsigned int, (conn->mtu - hlen), len);
2168 skb = chan->ops->alloc_skb(chan, hlen, count,
2169 msg->msg_flags & MSG_DONTWAIT);
2170 if (IS_ERR(skb))
2171 return skb;
2173 /* Create L2CAP header */
2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2175 lh->cid = cpu_to_le16(chan->dcid);
2176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2184 return skb;
2187 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2188 struct msghdr *msg, size_t len)
2190 struct l2cap_conn *conn = chan->conn;
2191 struct sk_buff *skb;
2192 int err, count;
2193 struct l2cap_hdr *lh;
2195 BT_DBG("chan %p len %zu", chan, len);
2197 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2199 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2200 msg->msg_flags & MSG_DONTWAIT);
2201 if (IS_ERR(skb))
2202 return skb;
2204 /* Create L2CAP header */
2205 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2206 lh->cid = cpu_to_le16(chan->dcid);
2207 lh->len = cpu_to_le16(len);
2209 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2210 if (unlikely(err < 0)) {
2211 kfree_skb(skb);
2212 return ERR_PTR(err);
2214 return skb;
2217 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2218 struct msghdr *msg, size_t len,
2219 u16 sdulen)
2221 struct l2cap_conn *conn = chan->conn;
2222 struct sk_buff *skb;
2223 int err, count, hlen;
2224 struct l2cap_hdr *lh;
2226 BT_DBG("chan %p len %zu", chan, len);
2228 if (!conn)
2229 return ERR_PTR(-ENOTCONN);
2231 hlen = __ertm_hdr_size(chan);
2233 if (sdulen)
2234 hlen += L2CAP_SDULEN_SIZE;
2236 if (chan->fcs == L2CAP_FCS_CRC16)
2237 hlen += L2CAP_FCS_SIZE;
2239 count = min_t(unsigned int, (conn->mtu - hlen), len);
2241 skb = chan->ops->alloc_skb(chan, hlen, count,
2242 msg->msg_flags & MSG_DONTWAIT);
2243 if (IS_ERR(skb))
2244 return skb;
2246 /* Create L2CAP header */
2247 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2248 lh->cid = cpu_to_le16(chan->dcid);
2249 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2251 /* Control header is populated later */
2252 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2253 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2254 else
2255 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2257 if (sdulen)
2258 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2260 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2261 if (unlikely(err < 0)) {
2262 kfree_skb(skb);
2263 return ERR_PTR(err);
2266 bt_cb(skb)->l2cap.fcs = chan->fcs;
2267 bt_cb(skb)->l2cap.retries = 0;
2268 return skb;
2271 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2272 struct sk_buff_head *seg_queue,
2273 struct msghdr *msg, size_t len)
2275 struct sk_buff *skb;
2276 u16 sdu_len;
2277 size_t pdu_len;
2278 u8 sar;
2280 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2282 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2283 * so fragmented skbs are not used. The HCI layer's handling
2284 * of fragmented skbs is not compatible with ERTM's queueing.
2287 /* PDU size is derived from the HCI MTU */
2288 pdu_len = chan->conn->mtu;
2290 /* Constrain PDU size for BR/EDR connections */
2291 if (!chan->hs_hcon)
2292 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2294 /* Adjust for largest possible L2CAP overhead. */
2295 if (chan->fcs)
2296 pdu_len -= L2CAP_FCS_SIZE;
2298 pdu_len -= __ertm_hdr_size(chan);
2300 /* Remote device may have requested smaller PDUs */
2301 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2303 if (len <= pdu_len) {
2304 sar = L2CAP_SAR_UNSEGMENTED;
2305 sdu_len = 0;
2306 pdu_len = len;
2307 } else {
2308 sar = L2CAP_SAR_START;
2309 sdu_len = len;
2312 while (len > 0) {
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2315 if (IS_ERR(skb)) {
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2320 bt_cb(skb)->l2cap.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2323 len -= pdu_len;
2324 if (sdu_len)
2325 sdu_len = 0;
2327 if (len <= pdu_len) {
2328 sar = L2CAP_SAR_END;
2329 pdu_len = len;
2330 } else {
2331 sar = L2CAP_SAR_CONTINUE;
2335 return 0;
2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2339 struct msghdr *msg,
2340 size_t len, u16 sdulen)
2342 struct l2cap_conn *conn = chan->conn;
2343 struct sk_buff *skb;
2344 int err, count, hlen;
2345 struct l2cap_hdr *lh;
2347 BT_DBG("chan %p len %zu", chan, len);
2349 if (!conn)
2350 return ERR_PTR(-ENOTCONN);
2352 hlen = L2CAP_HDR_SIZE;
2354 if (sdulen)
2355 hlen += L2CAP_SDULEN_SIZE;
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2359 skb = chan->ops->alloc_skb(chan, hlen, count,
2360 msg->msg_flags & MSG_DONTWAIT);
2361 if (IS_ERR(skb))
2362 return skb;
2364 /* Create L2CAP header */
2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366 lh->cid = cpu_to_le16(chan->dcid);
2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2369 if (sdulen)
2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2374 kfree_skb(skb);
2375 return ERR_PTR(err);
2378 return skb;
2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2385 struct sk_buff *skb;
2386 size_t pdu_len;
2387 u16 sdu_len;
2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2391 sdu_len = len;
2392 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2394 while (len > 0) {
2395 if (len <= pdu_len)
2396 pdu_len = len;
2398 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2399 if (IS_ERR(skb)) {
2400 __skb_queue_purge(seg_queue);
2401 return PTR_ERR(skb);
2404 __skb_queue_tail(seg_queue, skb);
2406 len -= pdu_len;
2408 if (sdu_len) {
2409 sdu_len = 0;
2410 pdu_len += L2CAP_SDULEN_SIZE;
2414 return 0;
2417 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2419 struct sk_buff *skb;
2420 int err;
2421 struct sk_buff_head seg_queue;
2423 if (!chan->conn)
2424 return -ENOTCONN;
2426 /* Connectionless channel */
2427 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2428 skb = l2cap_create_connless_pdu(chan, msg, len);
2429 if (IS_ERR(skb))
2430 return PTR_ERR(skb);
2432 /* Channel lock is released before requesting new skb and then
2433 * reacquired thus we need to recheck channel state.
2435 if (chan->state != BT_CONNECTED) {
2436 kfree_skb(skb);
2437 return -ENOTCONN;
2440 l2cap_do_send(chan, skb);
2441 return len;
2444 switch (chan->mode) {
2445 case L2CAP_MODE_LE_FLOWCTL:
2446 /* Check outgoing MTU */
2447 if (len > chan->omtu)
2448 return -EMSGSIZE;
2450 if (!chan->tx_credits)
2451 return -EAGAIN;
2453 __skb_queue_head_init(&seg_queue);
2455 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2457 if (chan->state != BT_CONNECTED) {
2458 __skb_queue_purge(&seg_queue);
2459 err = -ENOTCONN;
2462 if (err)
2463 return err;
2465 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2467 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2468 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2469 chan->tx_credits--;
2472 if (!chan->tx_credits)
2473 chan->ops->suspend(chan);
2475 err = len;
2477 break;
2479 case L2CAP_MODE_BASIC:
2480 /* Check outgoing MTU */
2481 if (len > chan->omtu)
2482 return -EMSGSIZE;
2484 /* Create a basic PDU */
2485 skb = l2cap_create_basic_pdu(chan, msg, len);
2486 if (IS_ERR(skb))
2487 return PTR_ERR(skb);
2489 /* Channel lock is released before requesting new skb and then
2490 * reacquired thus we need to recheck channel state.
2492 if (chan->state != BT_CONNECTED) {
2493 kfree_skb(skb);
2494 return -ENOTCONN;
2497 l2cap_do_send(chan, skb);
2498 err = len;
2499 break;
2501 case L2CAP_MODE_ERTM:
2502 case L2CAP_MODE_STREAMING:
2503 /* Check outgoing MTU */
2504 if (len > chan->omtu) {
2505 err = -EMSGSIZE;
2506 break;
2509 __skb_queue_head_init(&seg_queue);
2511 /* Do segmentation before calling in to the state machine,
2512 * since it's possible to block while waiting for memory
2513 * allocation.
2515 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2517 /* The channel could have been closed while segmenting,
2518 * check that it is still connected.
2520 if (chan->state != BT_CONNECTED) {
2521 __skb_queue_purge(&seg_queue);
2522 err = -ENOTCONN;
2525 if (err)
2526 break;
2528 if (chan->mode == L2CAP_MODE_ERTM)
2529 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2530 else
2531 l2cap_streaming_send(chan, &seg_queue);
2533 err = len;
2535 /* If the skbs were not queued for sending, they'll still be in
2536 * seg_queue and need to be purged.
2538 __skb_queue_purge(&seg_queue);
2539 break;
2541 default:
2542 BT_DBG("bad state %1.1x", chan->mode);
2543 err = -EBADFD;
2546 return err;
2548 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2550 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2552 struct l2cap_ctrl control;
2553 u16 seq;
2555 BT_DBG("chan %p, txseq %u", chan, txseq);
2557 memset(&control, 0, sizeof(control));
2558 control.sframe = 1;
2559 control.super = L2CAP_SUPER_SREJ;
2561 for (seq = chan->expected_tx_seq; seq != txseq;
2562 seq = __next_seq(chan, seq)) {
2563 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2564 control.reqseq = seq;
2565 l2cap_send_sframe(chan, &control);
2566 l2cap_seq_list_append(&chan->srej_list, seq);
2570 chan->expected_tx_seq = __next_seq(chan, txseq);
2573 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2575 struct l2cap_ctrl control;
2577 BT_DBG("chan %p", chan);
2579 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2580 return;
2582 memset(&control, 0, sizeof(control));
2583 control.sframe = 1;
2584 control.super = L2CAP_SUPER_SREJ;
2585 control.reqseq = chan->srej_list.tail;
2586 l2cap_send_sframe(chan, &control);
2589 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2591 struct l2cap_ctrl control;
2592 u16 initial_head;
2593 u16 seq;
2595 BT_DBG("chan %p, txseq %u", chan, txseq);
2597 memset(&control, 0, sizeof(control));
2598 control.sframe = 1;
2599 control.super = L2CAP_SUPER_SREJ;
2601 /* Capture initial list head to allow only one pass through the list. */
2602 initial_head = chan->srej_list.head;
2604 do {
2605 seq = l2cap_seq_list_pop(&chan->srej_list);
2606 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2607 break;
2609 control.reqseq = seq;
2610 l2cap_send_sframe(chan, &control);
2611 l2cap_seq_list_append(&chan->srej_list, seq);
2612 } while (chan->srej_list.head != initial_head);
2615 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2617 struct sk_buff *acked_skb;
2618 u16 ackseq;
2620 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2622 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2623 return;
2625 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2626 chan->expected_ack_seq, chan->unacked_frames);
2628 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2629 ackseq = __next_seq(chan, ackseq)) {
2631 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2632 if (acked_skb) {
2633 skb_unlink(acked_skb, &chan->tx_q);
2634 kfree_skb(acked_skb);
2635 chan->unacked_frames--;
2639 chan->expected_ack_seq = reqseq;
2641 if (chan->unacked_frames == 0)
2642 __clear_retrans_timer(chan);
2644 BT_DBG("unacked_frames %u", chan->unacked_frames);
2647 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2649 BT_DBG("chan %p", chan);
2651 chan->expected_tx_seq = chan->buffer_seq;
2652 l2cap_seq_list_clear(&chan->srej_list);
2653 skb_queue_purge(&chan->srej_q);
2654 chan->rx_state = L2CAP_RX_STATE_RECV;
2657 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2658 struct l2cap_ctrl *control,
2659 struct sk_buff_head *skbs, u8 event)
2661 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2662 event);
2664 switch (event) {
2665 case L2CAP_EV_DATA_REQUEST:
2666 if (chan->tx_send_head == NULL)
2667 chan->tx_send_head = skb_peek(skbs);
2669 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2670 l2cap_ertm_send(chan);
2671 break;
2672 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2673 BT_DBG("Enter LOCAL_BUSY");
2674 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2676 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2677 /* The SREJ_SENT state must be aborted if we are to
2678 * enter the LOCAL_BUSY state.
2680 l2cap_abort_rx_srej_sent(chan);
2683 l2cap_send_ack(chan);
2685 break;
2686 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2687 BT_DBG("Exit LOCAL_BUSY");
2688 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2690 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2691 struct l2cap_ctrl local_control;
2693 memset(&local_control, 0, sizeof(local_control));
2694 local_control.sframe = 1;
2695 local_control.super = L2CAP_SUPER_RR;
2696 local_control.poll = 1;
2697 local_control.reqseq = chan->buffer_seq;
2698 l2cap_send_sframe(chan, &local_control);
2700 chan->retry_count = 1;
2701 __set_monitor_timer(chan);
2702 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2704 break;
2705 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2706 l2cap_process_reqseq(chan, control->reqseq);
2707 break;
2708 case L2CAP_EV_EXPLICIT_POLL:
2709 l2cap_send_rr_or_rnr(chan, 1);
2710 chan->retry_count = 1;
2711 __set_monitor_timer(chan);
2712 __clear_ack_timer(chan);
2713 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 break;
2715 case L2CAP_EV_RETRANS_TO:
2716 l2cap_send_rr_or_rnr(chan, 1);
2717 chan->retry_count = 1;
2718 __set_monitor_timer(chan);
2719 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 break;
2721 case L2CAP_EV_RECV_FBIT:
2722 /* Nothing to process */
2723 break;
2724 default:
2725 break;
2729 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2734 event);
2736 switch (event) {
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2740 /* Queue data, but don't send. */
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 break;
2743 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2744 BT_DBG("Enter LOCAL_BUSY");
2745 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2747 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2748 /* The SREJ_SENT state must be aborted if we are to
2749 * enter the LOCAL_BUSY state.
2751 l2cap_abort_rx_srej_sent(chan);
2754 l2cap_send_ack(chan);
2756 break;
2757 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2758 BT_DBG("Exit LOCAL_BUSY");
2759 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2761 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2762 struct l2cap_ctrl local_control;
2763 memset(&local_control, 0, sizeof(local_control));
2764 local_control.sframe = 1;
2765 local_control.super = L2CAP_SUPER_RR;
2766 local_control.poll = 1;
2767 local_control.reqseq = chan->buffer_seq;
2768 l2cap_send_sframe(chan, &local_control);
2770 chan->retry_count = 1;
2771 __set_monitor_timer(chan);
2772 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2774 break;
2775 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2776 l2cap_process_reqseq(chan, control->reqseq);
2778 /* Fall through */
2780 case L2CAP_EV_RECV_FBIT:
2781 if (control && control->final) {
2782 __clear_monitor_timer(chan);
2783 if (chan->unacked_frames > 0)
2784 __set_retrans_timer(chan);
2785 chan->retry_count = 0;
2786 chan->tx_state = L2CAP_TX_STATE_XMIT;
2787 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2789 break;
2790 case L2CAP_EV_EXPLICIT_POLL:
2791 /* Ignore */
2792 break;
2793 case L2CAP_EV_MONITOR_TO:
2794 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2795 l2cap_send_rr_or_rnr(chan, 1);
2796 __set_monitor_timer(chan);
2797 chan->retry_count++;
2798 } else {
2799 l2cap_send_disconn_req(chan, ECONNABORTED);
2801 break;
2802 default:
2803 break;
2807 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2808 struct sk_buff_head *skbs, u8 event)
2810 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2811 chan, control, skbs, event, chan->tx_state);
2813 switch (chan->tx_state) {
2814 case L2CAP_TX_STATE_XMIT:
2815 l2cap_tx_state_xmit(chan, control, skbs, event);
2816 break;
2817 case L2CAP_TX_STATE_WAIT_F:
2818 l2cap_tx_state_wait_f(chan, control, skbs, event);
2819 break;
2820 default:
2821 /* Ignore event */
2822 break;
2826 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2827 struct l2cap_ctrl *control)
2829 BT_DBG("chan %p, control %p", chan, control);
2830 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2833 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2834 struct l2cap_ctrl *control)
2836 BT_DBG("chan %p, control %p", chan, control);
2837 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2840 /* Copy frame to all raw sockets on that connection */
2841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2843 struct sk_buff *nskb;
2844 struct l2cap_chan *chan;
2846 BT_DBG("conn %p", conn);
2848 mutex_lock(&conn->chan_lock);
2850 list_for_each_entry(chan, &conn->chan_l, list) {
2851 if (chan->chan_type != L2CAP_CHAN_RAW)
2852 continue;
2854 /* Don't send frame to the channel it came from */
2855 if (bt_cb(skb)->l2cap.chan == chan)
2856 continue;
2858 nskb = skb_clone(skb, GFP_KERNEL);
2859 if (!nskb)
2860 continue;
2861 if (chan->ops->recv(chan, nskb))
2862 kfree_skb(nskb);
2865 mutex_unlock(&conn->chan_lock);
2868 /* ---- L2CAP signalling commands ---- */
2869 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2870 u8 ident, u16 dlen, void *data)
2872 struct sk_buff *skb, **frag;
2873 struct l2cap_cmd_hdr *cmd;
2874 struct l2cap_hdr *lh;
2875 int len, count;
2877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2878 conn, code, ident, dlen);
2880 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2881 return NULL;
2883 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2884 count = min_t(unsigned int, conn->mtu, len);
2886 skb = bt_skb_alloc(count, GFP_KERNEL);
2887 if (!skb)
2888 return NULL;
2890 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2891 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2893 if (conn->hcon->type == LE_LINK)
2894 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2895 else
2896 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2898 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2899 cmd->code = code;
2900 cmd->ident = ident;
2901 cmd->len = cpu_to_le16(dlen);
2903 if (dlen) {
2904 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2905 memcpy(skb_put(skb, count), data, count);
2906 data += count;
2909 len -= skb->len;
2911 /* Continuation fragments (no L2CAP header) */
2912 frag = &skb_shinfo(skb)->frag_list;
2913 while (len) {
2914 count = min_t(unsigned int, conn->mtu, len);
2916 *frag = bt_skb_alloc(count, GFP_KERNEL);
2917 if (!*frag)
2918 goto fail;
2920 memcpy(skb_put(*frag, count), data, count);
2922 len -= count;
2923 data += count;
2925 frag = &(*frag)->next;
2928 return skb;
2930 fail:
2931 kfree_skb(skb);
2932 return NULL;
2935 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2936 unsigned long *val)
2938 struct l2cap_conf_opt *opt = *ptr;
2939 int len;
2941 len = L2CAP_CONF_OPT_SIZE + opt->len;
2942 *ptr += len;
2944 *type = opt->type;
2945 *olen = opt->len;
2947 switch (opt->len) {
2948 case 1:
2949 *val = *((u8 *) opt->val);
2950 break;
2952 case 2:
2953 *val = get_unaligned_le16(opt->val);
2954 break;
2956 case 4:
2957 *val = get_unaligned_le32(opt->val);
2958 break;
2960 default:
2961 *val = (unsigned long) opt->val;
2962 break;
2965 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2966 return len;
2969 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2971 struct l2cap_conf_opt *opt = *ptr;
2973 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2975 if (size < L2CAP_CONF_OPT_SIZE + len)
2976 return;
2978 opt->type = type;
2979 opt->len = len;
2981 switch (len) {
2982 case 1:
2983 *((u8 *) opt->val) = val;
2984 break;
2986 case 2:
2987 put_unaligned_le16(val, opt->val);
2988 break;
2990 case 4:
2991 put_unaligned_le32(val, opt->val);
2992 break;
2994 default:
2995 memcpy(opt->val, (void *) val, len);
2996 break;
2999 *ptr += L2CAP_CONF_OPT_SIZE + len;
3002 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3004 struct l2cap_conf_efs efs;
3006 switch (chan->mode) {
3007 case L2CAP_MODE_ERTM:
3008 efs.id = chan->local_id;
3009 efs.stype = chan->local_stype;
3010 efs.msdu = cpu_to_le16(chan->local_msdu);
3011 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3012 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3013 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3014 break;
3016 case L2CAP_MODE_STREAMING:
3017 efs.id = 1;
3018 efs.stype = L2CAP_SERV_BESTEFFORT;
3019 efs.msdu = cpu_to_le16(chan->local_msdu);
3020 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3021 efs.acc_lat = 0;
3022 efs.flush_to = 0;
3023 break;
3025 default:
3026 return;
3029 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3030 (unsigned long) &efs, size);
3033 static void l2cap_ack_timeout(struct work_struct *work)
3035 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3036 ack_timer.work);
3037 u16 frames_to_ack;
3039 BT_DBG("chan %p", chan);
3041 l2cap_chan_lock(chan);
3043 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3044 chan->last_acked_seq);
3046 if (frames_to_ack)
3047 l2cap_send_rr_or_rnr(chan, 0);
3049 l2cap_chan_unlock(chan);
3050 l2cap_chan_put(chan);
3053 int l2cap_ertm_init(struct l2cap_chan *chan)
3055 int err;
3057 chan->next_tx_seq = 0;
3058 chan->expected_tx_seq = 0;
3059 chan->expected_ack_seq = 0;
3060 chan->unacked_frames = 0;
3061 chan->buffer_seq = 0;
3062 chan->frames_sent = 0;
3063 chan->last_acked_seq = 0;
3064 chan->sdu = NULL;
3065 chan->sdu_last_frag = NULL;
3066 chan->sdu_len = 0;
3068 skb_queue_head_init(&chan->tx_q);
3070 chan->local_amp_id = AMP_ID_BREDR;
3071 chan->move_id = AMP_ID_BREDR;
3072 chan->move_state = L2CAP_MOVE_STABLE;
3073 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3075 if (chan->mode != L2CAP_MODE_ERTM)
3076 return 0;
3078 chan->rx_state = L2CAP_RX_STATE_RECV;
3079 chan->tx_state = L2CAP_TX_STATE_XMIT;
3081 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3082 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3083 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3085 skb_queue_head_init(&chan->srej_q);
3087 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3088 if (err < 0)
3089 return err;
3091 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3092 if (err < 0)
3093 l2cap_seq_list_free(&chan->srej_list);
3095 return err;
3098 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3100 switch (mode) {
3101 case L2CAP_MODE_STREAMING:
3102 case L2CAP_MODE_ERTM:
3103 if (l2cap_mode_supported(mode, remote_feat_mask))
3104 return mode;
3105 /* fall through */
3106 default:
3107 return L2CAP_MODE_BASIC;
3111 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3113 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3114 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3117 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3119 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3120 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3123 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3124 struct l2cap_conf_rfc *rfc)
3126 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3127 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3129 /* Class 1 devices have must have ERTM timeouts
3130 * exceeding the Link Supervision Timeout. The
3131 * default Link Supervision Timeout for AMP
3132 * controllers is 10 seconds.
3134 * Class 1 devices use 0xffffffff for their
3135 * best-effort flush timeout, so the clamping logic
3136 * will result in a timeout that meets the above
3137 * requirement. ERTM timeouts are 16-bit values, so
3138 * the maximum timeout is 65.535 seconds.
3141 /* Convert timeout to milliseconds and round */
3142 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3144 /* This is the recommended formula for class 2 devices
3145 * that start ERTM timers when packets are sent to the
3146 * controller.
3148 ertm_to = 3 * ertm_to + 500;
3150 if (ertm_to > 0xffff)
3151 ertm_to = 0xffff;
3153 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3154 rfc->monitor_timeout = rfc->retrans_timeout;
3155 } else {
3156 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3157 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3161 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3163 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3164 __l2cap_ews_supported(chan->conn)) {
3165 /* use extended control field */
3166 set_bit(FLAG_EXT_CTRL, &chan->flags);
3167 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3168 } else {
3169 chan->tx_win = min_t(u16, chan->tx_win,
3170 L2CAP_DEFAULT_TX_WINDOW);
3171 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3173 chan->ack_win = chan->tx_win;
3176 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3178 struct l2cap_conf_req *req = data;
3179 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3180 void *ptr = req->data;
3181 void *endptr = data + data_size;
3182 u16 size;
3184 BT_DBG("chan %p", chan);
3186 if (chan->num_conf_req || chan->num_conf_rsp)
3187 goto done;
3189 switch (chan->mode) {
3190 case L2CAP_MODE_STREAMING:
3191 case L2CAP_MODE_ERTM:
3192 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3193 break;
3195 if (__l2cap_efs_supported(chan->conn))
3196 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3198 /* fall through */
3199 default:
3200 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3201 break;
3204 done:
3205 if (chan->imtu != L2CAP_DEFAULT_MTU)
3206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3208 switch (chan->mode) {
3209 case L2CAP_MODE_BASIC:
3210 if (disable_ertm)
3211 break;
3213 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3214 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3215 break;
3217 rfc.mode = L2CAP_MODE_BASIC;
3218 rfc.txwin_size = 0;
3219 rfc.max_transmit = 0;
3220 rfc.retrans_timeout = 0;
3221 rfc.monitor_timeout = 0;
3222 rfc.max_pdu_size = 0;
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3225 (unsigned long) &rfc, endptr - ptr);
3226 break;
3228 case L2CAP_MODE_ERTM:
3229 rfc.mode = L2CAP_MODE_ERTM;
3230 rfc.max_transmit = chan->max_tx;
3232 __l2cap_set_ertm_timeouts(chan, &rfc);
3234 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 L2CAP_FCS_SIZE);
3237 rfc.max_pdu_size = cpu_to_le16(size);
3239 l2cap_txwin_setup(chan);
3241 rfc.txwin_size = min_t(u16, chan->tx_win,
3242 L2CAP_DEFAULT_TX_WINDOW);
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3245 (unsigned long) &rfc, endptr - ptr);
3247 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3248 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3250 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3252 chan->tx_win, endptr - ptr);
3254 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3255 if (chan->fcs == L2CAP_FCS_NONE ||
3256 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3257 chan->fcs = L2CAP_FCS_NONE;
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3259 chan->fcs, endptr - ptr);
3261 break;
3263 case L2CAP_MODE_STREAMING:
3264 l2cap_txwin_setup(chan);
3265 rfc.mode = L2CAP_MODE_STREAMING;
3266 rfc.txwin_size = 0;
3267 rfc.max_transmit = 0;
3268 rfc.retrans_timeout = 0;
3269 rfc.monitor_timeout = 0;
3271 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3272 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3273 L2CAP_FCS_SIZE);
3274 rfc.max_pdu_size = cpu_to_le16(size);
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 (unsigned long) &rfc, endptr - ptr);
3279 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3280 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3282 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3283 if (chan->fcs == L2CAP_FCS_NONE ||
3284 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3285 chan->fcs = L2CAP_FCS_NONE;
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3287 chan->fcs, endptr - ptr);
3289 break;
3292 req->dcid = cpu_to_le16(chan->dcid);
3293 req->flags = cpu_to_le16(0);
3295 return ptr - data;
3298 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3300 struct l2cap_conf_rsp *rsp = data;
3301 void *ptr = rsp->data;
3302 void *endptr = data + data_size;
3303 void *req = chan->conf_req;
3304 int len = chan->conf_len;
3305 int type, hint, olen;
3306 unsigned long val;
3307 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3308 struct l2cap_conf_efs efs;
3309 u8 remote_efs = 0;
3310 u16 mtu = L2CAP_DEFAULT_MTU;
3311 u16 result = L2CAP_CONF_SUCCESS;
3312 u16 size;
3314 BT_DBG("chan %p", chan);
3316 while (len >= L2CAP_CONF_OPT_SIZE) {
3317 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3319 hint = type & L2CAP_CONF_HINT;
3320 type &= L2CAP_CONF_MASK;
3322 switch (type) {
3323 case L2CAP_CONF_MTU:
3324 mtu = val;
3325 break;
3327 case L2CAP_CONF_FLUSH_TO:
3328 chan->flush_to = val;
3329 break;
3331 case L2CAP_CONF_QOS:
3332 break;
3334 case L2CAP_CONF_RFC:
3335 if (olen == sizeof(rfc))
3336 memcpy(&rfc, (void *) val, olen);
3337 break;
3339 case L2CAP_CONF_FCS:
3340 if (val == L2CAP_FCS_NONE)
3341 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3342 break;
3344 case L2CAP_CONF_EFS:
3345 if (olen == sizeof(efs)) {
3346 remote_efs = 1;
3347 memcpy(&efs, (void *) val, olen);
3349 break;
3351 case L2CAP_CONF_EWS:
3352 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3353 return -ECONNREFUSED;
3355 set_bit(FLAG_EXT_CTRL, &chan->flags);
3356 set_bit(CONF_EWS_RECV, &chan->conf_state);
3357 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3358 chan->remote_tx_win = val;
3359 break;
3361 default:
3362 if (hint)
3363 break;
3365 result = L2CAP_CONF_UNKNOWN;
3366 *((u8 *) ptr++) = type;
3367 break;
3371 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3372 goto done;
3374 switch (chan->mode) {
3375 case L2CAP_MODE_STREAMING:
3376 case L2CAP_MODE_ERTM:
3377 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3378 chan->mode = l2cap_select_mode(rfc.mode,
3379 chan->conn->feat_mask);
3380 break;
3383 if (remote_efs) {
3384 if (__l2cap_efs_supported(chan->conn))
3385 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3386 else
3387 return -ECONNREFUSED;
3390 if (chan->mode != rfc.mode)
3391 return -ECONNREFUSED;
3393 break;
3396 done:
3397 if (chan->mode != rfc.mode) {
3398 result = L2CAP_CONF_UNACCEPT;
3399 rfc.mode = chan->mode;
3401 if (chan->num_conf_rsp == 1)
3402 return -ECONNREFUSED;
3404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3405 (unsigned long) &rfc, endptr - ptr);
3408 if (result == L2CAP_CONF_SUCCESS) {
3409 /* Configure output options and let the other side know
3410 * which ones we don't like. */
3412 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3413 result = L2CAP_CONF_UNACCEPT;
3414 else {
3415 chan->omtu = mtu;
3416 set_bit(CONF_MTU_DONE, &chan->conf_state);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3420 if (remote_efs) {
3421 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != L2CAP_SERV_NOTRAFIC &&
3423 efs.stype != chan->local_stype) {
3425 result = L2CAP_CONF_UNACCEPT;
3427 if (chan->num_conf_req >= 1)
3428 return -ECONNREFUSED;
3430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3431 sizeof(efs),
3432 (unsigned long) &efs, endptr - ptr);
3433 } else {
3434 /* Send PENDING Conf Rsp */
3435 result = L2CAP_CONF_PENDING;
3436 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3440 switch (rfc.mode) {
3441 case L2CAP_MODE_BASIC:
3442 chan->fcs = L2CAP_FCS_NONE;
3443 set_bit(CONF_MODE_DONE, &chan->conf_state);
3444 break;
3446 case L2CAP_MODE_ERTM:
3447 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3448 chan->remote_tx_win = rfc.txwin_size;
3449 else
3450 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3452 chan->remote_max_tx = rfc.max_transmit;
3454 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3455 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3456 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3457 rfc.max_pdu_size = cpu_to_le16(size);
3458 chan->remote_mps = size;
3460 __l2cap_set_ertm_timeouts(chan, &rfc);
3462 set_bit(CONF_MODE_DONE, &chan->conf_state);
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3465 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3467 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3468 chan->remote_id = efs.id;
3469 chan->remote_stype = efs.stype;
3470 chan->remote_msdu = le16_to_cpu(efs.msdu);
3471 chan->remote_flush_to =
3472 le32_to_cpu(efs.flush_to);
3473 chan->remote_acc_lat =
3474 le32_to_cpu(efs.acc_lat);
3475 chan->remote_sdu_itime =
3476 le32_to_cpu(efs.sdu_itime);
3477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3478 sizeof(efs),
3479 (unsigned long) &efs, endptr - ptr);
3481 break;
3483 case L2CAP_MODE_STREAMING:
3484 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3485 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3486 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3487 rfc.max_pdu_size = cpu_to_le16(size);
3488 chan->remote_mps = size;
3490 set_bit(CONF_MODE_DONE, &chan->conf_state);
3492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3493 (unsigned long) &rfc, endptr - ptr);
3495 break;
3497 default:
3498 result = L2CAP_CONF_UNACCEPT;
3500 memset(&rfc, 0, sizeof(rfc));
3501 rfc.mode = chan->mode;
3504 if (result == L2CAP_CONF_SUCCESS)
3505 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3507 rsp->scid = cpu_to_le16(chan->dcid);
3508 rsp->result = cpu_to_le16(result);
3509 rsp->flags = cpu_to_le16(0);
3511 return ptr - data;
3514 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3515 void *data, size_t size, u16 *result)
3517 struct l2cap_conf_req *req = data;
3518 void *ptr = req->data;
3519 void *endptr = data + size;
3520 int type, olen;
3521 unsigned long val;
3522 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3523 struct l2cap_conf_efs efs;
3525 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3527 while (len >= L2CAP_CONF_OPT_SIZE) {
3528 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3530 switch (type) {
3531 case L2CAP_CONF_MTU:
3532 if (val < L2CAP_DEFAULT_MIN_MTU) {
3533 *result = L2CAP_CONF_UNACCEPT;
3534 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3535 } else
3536 chan->imtu = val;
3537 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3538 break;
3540 case L2CAP_CONF_FLUSH_TO:
3541 chan->flush_to = val;
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3543 2, chan->flush_to, endptr - ptr);
3544 break;
3546 case L2CAP_CONF_RFC:
3547 if (olen == sizeof(rfc))
3548 memcpy(&rfc, (void *)val, olen);
3550 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3551 rfc.mode != chan->mode)
3552 return -ECONNREFUSED;
3554 chan->fcs = 0;
3556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3557 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3558 break;
3560 case L2CAP_CONF_EWS:
3561 chan->ack_win = min_t(u16, val, chan->ack_win);
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3563 chan->tx_win, endptr - ptr);
3564 break;
3566 case L2CAP_CONF_EFS:
3567 if (olen == sizeof(efs)) {
3568 memcpy(&efs, (void *)val, olen);
3570 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3571 efs.stype != L2CAP_SERV_NOTRAFIC &&
3572 efs.stype != chan->local_stype)
3573 return -ECONNREFUSED;
3575 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3576 (unsigned long) &efs, endptr - ptr);
3578 break;
3580 case L2CAP_CONF_FCS:
3581 if (*result == L2CAP_CONF_PENDING)
3582 if (val == L2CAP_FCS_NONE)
3583 set_bit(CONF_RECV_NO_FCS,
3584 &chan->conf_state);
3585 break;
3589 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3590 return -ECONNREFUSED;
3592 chan->mode = rfc.mode;
3594 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3595 switch (rfc.mode) {
3596 case L2CAP_MODE_ERTM:
3597 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3598 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3599 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3600 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3601 chan->ack_win = min_t(u16, chan->ack_win,
3602 rfc.txwin_size);
3604 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3605 chan->local_msdu = le16_to_cpu(efs.msdu);
3606 chan->local_sdu_itime =
3607 le32_to_cpu(efs.sdu_itime);
3608 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3609 chan->local_flush_to =
3610 le32_to_cpu(efs.flush_to);
3612 break;
3614 case L2CAP_MODE_STREAMING:
3615 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3619 req->dcid = cpu_to_le16(chan->dcid);
3620 req->flags = cpu_to_le16(0);
3622 return ptr - data;
3625 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3626 u16 result, u16 flags)
3628 struct l2cap_conf_rsp *rsp = data;
3629 void *ptr = rsp->data;
3631 BT_DBG("chan %p", chan);
3633 rsp->scid = cpu_to_le16(chan->dcid);
3634 rsp->result = cpu_to_le16(result);
3635 rsp->flags = cpu_to_le16(flags);
3637 return ptr - data;
3640 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3642 struct l2cap_le_conn_rsp rsp;
3643 struct l2cap_conn *conn = chan->conn;
3645 BT_DBG("chan %p", chan);
3647 rsp.dcid = cpu_to_le16(chan->scid);
3648 rsp.mtu = cpu_to_le16(chan->imtu);
3649 rsp.mps = cpu_to_le16(chan->mps);
3650 rsp.credits = cpu_to_le16(chan->rx_credits);
3651 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3653 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3654 &rsp);
3657 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3659 struct l2cap_conn_rsp rsp;
3660 struct l2cap_conn *conn = chan->conn;
3661 u8 buf[128];
3662 u8 rsp_code;
3664 rsp.scid = cpu_to_le16(chan->dcid);
3665 rsp.dcid = cpu_to_le16(chan->scid);
3666 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3667 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3669 if (chan->hs_hcon)
3670 rsp_code = L2CAP_CREATE_CHAN_RSP;
3671 else
3672 rsp_code = L2CAP_CONN_RSP;
3674 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3676 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3678 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3679 return;
3681 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3682 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3683 chan->num_conf_req++;
3686 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3688 int type, olen;
3689 unsigned long val;
3690 /* Use sane default values in case a misbehaving remote device
3691 * did not send an RFC or extended window size option.
3693 u16 txwin_ext = chan->ack_win;
3694 struct l2cap_conf_rfc rfc = {
3695 .mode = chan->mode,
3696 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3697 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3698 .max_pdu_size = cpu_to_le16(chan->imtu),
3699 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3702 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3704 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3705 return;
3707 while (len >= L2CAP_CONF_OPT_SIZE) {
3708 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3710 switch (type) {
3711 case L2CAP_CONF_RFC:
3712 if (olen == sizeof(rfc))
3713 memcpy(&rfc, (void *)val, olen);
3714 break;
3715 case L2CAP_CONF_EWS:
3716 txwin_ext = val;
3717 break;
3721 switch (rfc.mode) {
3722 case L2CAP_MODE_ERTM:
3723 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3724 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3725 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3726 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3727 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3728 else
3729 chan->ack_win = min_t(u16, chan->ack_win,
3730 rfc.txwin_size);
3731 break;
3732 case L2CAP_MODE_STREAMING:
3733 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3737 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3738 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3739 u8 *data)
3741 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3743 if (cmd_len < sizeof(*rej))
3744 return -EPROTO;
3746 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3747 return 0;
3749 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3750 cmd->ident == conn->info_ident) {
3751 cancel_delayed_work(&conn->info_timer);
3753 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3754 conn->info_ident = 0;
3756 l2cap_conn_start(conn);
3759 return 0;
3762 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3763 struct l2cap_cmd_hdr *cmd,
3764 u8 *data, u8 rsp_code, u8 amp_id)
3766 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3767 struct l2cap_conn_rsp rsp;
3768 struct l2cap_chan *chan = NULL, *pchan;
3769 int result, status = L2CAP_CS_NO_INFO;
3771 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3772 __le16 psm = req->psm;
3774 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3776 /* Check if we have socket listening on psm */
3777 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3778 &conn->hcon->dst, ACL_LINK);
3779 if (!pchan) {
3780 result = L2CAP_CR_BAD_PSM;
3781 goto sendresp;
3784 mutex_lock(&conn->chan_lock);
3785 l2cap_chan_lock(pchan);
3787 /* Check if the ACL is secure enough (if not SDP) */
3788 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3789 !hci_conn_check_link_mode(conn->hcon)) {
3790 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3791 result = L2CAP_CR_SEC_BLOCK;
3792 goto response;
3795 result = L2CAP_CR_NO_MEM;
3797 /* Check if we already have channel with that dcid */
3798 if (__l2cap_get_chan_by_dcid(conn, scid))
3799 goto response;
3801 chan = pchan->ops->new_connection(pchan);
3802 if (!chan)
3803 goto response;
3805 /* For certain devices (ex: HID mouse), support for authentication,
3806 * pairing and bonding is optional. For such devices, inorder to avoid
3807 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3808 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3810 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3812 bacpy(&chan->src, &conn->hcon->src);
3813 bacpy(&chan->dst, &conn->hcon->dst);
3814 chan->src_type = bdaddr_src_type(conn->hcon);
3815 chan->dst_type = bdaddr_dst_type(conn->hcon);
3816 chan->psm = psm;
3817 chan->dcid = scid;
3818 chan->local_amp_id = amp_id;
3820 __l2cap_chan_add(conn, chan);
3822 dcid = chan->scid;
3824 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3826 chan->ident = cmd->ident;
3828 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3829 if (l2cap_chan_check_security(chan, false)) {
3830 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3831 l2cap_state_change(chan, BT_CONNECT2);
3832 result = L2CAP_CR_PEND;
3833 status = L2CAP_CS_AUTHOR_PEND;
3834 chan->ops->defer(chan);
3835 } else {
3836 /* Force pending result for AMP controllers.
3837 * The connection will succeed after the
3838 * physical link is up.
3840 if (amp_id == AMP_ID_BREDR) {
3841 l2cap_state_change(chan, BT_CONFIG);
3842 result = L2CAP_CR_SUCCESS;
3843 } else {
3844 l2cap_state_change(chan, BT_CONNECT2);
3845 result = L2CAP_CR_PEND;
3847 status = L2CAP_CS_NO_INFO;
3849 } else {
3850 l2cap_state_change(chan, BT_CONNECT2);
3851 result = L2CAP_CR_PEND;
3852 status = L2CAP_CS_AUTHEN_PEND;
3854 } else {
3855 l2cap_state_change(chan, BT_CONNECT2);
3856 result = L2CAP_CR_PEND;
3857 status = L2CAP_CS_NO_INFO;
3860 response:
3861 l2cap_chan_unlock(pchan);
3862 mutex_unlock(&conn->chan_lock);
3863 l2cap_chan_put(pchan);
3865 sendresp:
3866 rsp.scid = cpu_to_le16(scid);
3867 rsp.dcid = cpu_to_le16(dcid);
3868 rsp.result = cpu_to_le16(result);
3869 rsp.status = cpu_to_le16(status);
3870 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3872 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3873 struct l2cap_info_req info;
3874 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3877 conn->info_ident = l2cap_get_ident(conn);
3879 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3881 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3882 sizeof(info), &info);
3885 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3886 result == L2CAP_CR_SUCCESS) {
3887 u8 buf[128];
3888 set_bit(CONF_REQ_SENT, &chan->conf_state);
3889 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3890 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3891 chan->num_conf_req++;
3894 return chan;
3897 static int l2cap_connect_req(struct l2cap_conn *conn,
3898 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3900 struct hci_dev *hdev = conn->hcon->hdev;
3901 struct hci_conn *hcon = conn->hcon;
3903 if (cmd_len < sizeof(struct l2cap_conn_req))
3904 return -EPROTO;
3906 hci_dev_lock(hdev);
3907 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3908 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3909 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3910 hci_dev_unlock(hdev);
3912 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3913 return 0;
3916 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3917 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3918 u8 *data)
3920 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3921 u16 scid, dcid, result, status;
3922 struct l2cap_chan *chan;
3923 u8 req[128];
3924 int err;
3926 if (cmd_len < sizeof(*rsp))
3927 return -EPROTO;
3929 scid = __le16_to_cpu(rsp->scid);
3930 dcid = __le16_to_cpu(rsp->dcid);
3931 result = __le16_to_cpu(rsp->result);
3932 status = __le16_to_cpu(rsp->status);
3934 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3935 dcid, scid, result, status);
3937 mutex_lock(&conn->chan_lock);
3939 if (scid) {
3940 chan = __l2cap_get_chan_by_scid(conn, scid);
3941 if (!chan) {
3942 err = -EBADSLT;
3943 goto unlock;
3945 } else {
3946 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3947 if (!chan) {
3948 err = -EBADSLT;
3949 goto unlock;
3953 err = 0;
3955 l2cap_chan_lock(chan);
3957 switch (result) {
3958 case L2CAP_CR_SUCCESS:
3959 l2cap_state_change(chan, BT_CONFIG);
3960 chan->ident = 0;
3961 chan->dcid = dcid;
3962 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3964 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3965 break;
3967 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3968 l2cap_build_conf_req(chan, req, sizeof(req)), req);
3969 chan->num_conf_req++;
3970 break;
3972 case L2CAP_CR_PEND:
3973 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3974 break;
3976 default:
3977 l2cap_chan_del(chan, ECONNREFUSED);
3978 break;
3981 l2cap_chan_unlock(chan);
3983 unlock:
3984 mutex_unlock(&conn->chan_lock);
3986 return err;
3989 static inline void set_default_fcs(struct l2cap_chan *chan)
3991 /* FCS is enabled only in ERTM or streaming mode, if one or both
3992 * sides request it.
3994 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3995 chan->fcs = L2CAP_FCS_NONE;
3996 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3997 chan->fcs = L2CAP_FCS_CRC16;
4000 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4001 u8 ident, u16 flags)
4003 struct l2cap_conn *conn = chan->conn;
4005 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4006 flags);
4008 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4009 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4011 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4012 l2cap_build_conf_rsp(chan, data,
4013 L2CAP_CONF_SUCCESS, flags), data);
4016 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4017 u16 scid, u16 dcid)
4019 struct l2cap_cmd_rej_cid rej;
4021 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4022 rej.scid = __cpu_to_le16(scid);
4023 rej.dcid = __cpu_to_le16(dcid);
4025 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4028 static inline int l2cap_config_req(struct l2cap_conn *conn,
4029 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4030 u8 *data)
4032 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4033 u16 dcid, flags;
4034 u8 rsp[64];
4035 struct l2cap_chan *chan;
4036 int len, err = 0;
4038 if (cmd_len < sizeof(*req))
4039 return -EPROTO;
4041 dcid = __le16_to_cpu(req->dcid);
4042 flags = __le16_to_cpu(req->flags);
4044 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4046 chan = l2cap_get_chan_by_scid(conn, dcid);
4047 if (!chan) {
4048 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4049 return 0;
4052 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4053 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4054 chan->dcid);
4055 goto unlock;
4058 /* Reject if config buffer is too small. */
4059 len = cmd_len - sizeof(*req);
4060 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4061 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4062 l2cap_build_conf_rsp(chan, rsp,
4063 L2CAP_CONF_REJECT, flags), rsp);
4064 goto unlock;
4067 /* Store config. */
4068 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4069 chan->conf_len += len;
4071 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4072 /* Incomplete config. Send empty response. */
4073 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4074 l2cap_build_conf_rsp(chan, rsp,
4075 L2CAP_CONF_SUCCESS, flags), rsp);
4076 goto unlock;
4079 /* Complete config. */
4080 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4081 if (len < 0) {
4082 l2cap_send_disconn_req(chan, ECONNRESET);
4083 goto unlock;
4086 chan->ident = cmd->ident;
4087 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4088 chan->num_conf_rsp++;
4090 /* Reset config buffer. */
4091 chan->conf_len = 0;
4093 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4094 goto unlock;
4096 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4097 set_default_fcs(chan);
4099 if (chan->mode == L2CAP_MODE_ERTM ||
4100 chan->mode == L2CAP_MODE_STREAMING)
4101 err = l2cap_ertm_init(chan);
4103 if (err < 0)
4104 l2cap_send_disconn_req(chan, -err);
4105 else
4106 l2cap_chan_ready(chan);
4108 goto unlock;
4111 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4112 u8 buf[64];
4113 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4114 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4115 chan->num_conf_req++;
4118 /* Got Conf Rsp PENDING from remote side and assume we sent
4119 Conf Rsp PENDING in the code above */
4120 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4121 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4123 /* check compatibility */
4125 /* Send rsp for BR/EDR channel */
4126 if (!chan->hs_hcon)
4127 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4128 else
4129 chan->ident = cmd->ident;
4132 unlock:
4133 l2cap_chan_unlock(chan);
4134 return err;
4137 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4138 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4139 u8 *data)
4141 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4142 u16 scid, flags, result;
4143 struct l2cap_chan *chan;
4144 int len = cmd_len - sizeof(*rsp);
4145 int err = 0;
4147 if (cmd_len < sizeof(*rsp))
4148 return -EPROTO;
4150 scid = __le16_to_cpu(rsp->scid);
4151 flags = __le16_to_cpu(rsp->flags);
4152 result = __le16_to_cpu(rsp->result);
4154 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4155 result, len);
4157 chan = l2cap_get_chan_by_scid(conn, scid);
4158 if (!chan)
4159 return 0;
4161 switch (result) {
4162 case L2CAP_CONF_SUCCESS:
4163 l2cap_conf_rfc_get(chan, rsp->data, len);
4164 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4165 break;
4167 case L2CAP_CONF_PENDING:
4168 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4170 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4171 char buf[64];
4173 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4174 buf, sizeof(buf), &result);
4175 if (len < 0) {
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4177 goto done;
4180 if (!chan->hs_hcon) {
4181 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4183 } else {
4184 if (l2cap_check_efs(chan)) {
4185 amp_create_logical_link(chan);
4186 chan->ident = cmd->ident;
4190 goto done;
4192 case L2CAP_CONF_UNACCEPT:
4193 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4194 char req[64];
4196 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4197 l2cap_send_disconn_req(chan, ECONNRESET);
4198 goto done;
4201 /* throw out any old stored conf requests */
4202 result = L2CAP_CONF_SUCCESS;
4203 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4204 req, sizeof(req), &result);
4205 if (len < 0) {
4206 l2cap_send_disconn_req(chan, ECONNRESET);
4207 goto done;
4210 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4211 L2CAP_CONF_REQ, len, req);
4212 chan->num_conf_req++;
4213 if (result != L2CAP_CONF_SUCCESS)
4214 goto done;
4215 break;
4218 default:
4219 l2cap_chan_set_err(chan, ECONNRESET);
4221 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4222 l2cap_send_disconn_req(chan, ECONNRESET);
4223 goto done;
4226 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4227 goto done;
4229 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4231 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4232 set_default_fcs(chan);
4234 if (chan->mode == L2CAP_MODE_ERTM ||
4235 chan->mode == L2CAP_MODE_STREAMING)
4236 err = l2cap_ertm_init(chan);
4238 if (err < 0)
4239 l2cap_send_disconn_req(chan, -err);
4240 else
4241 l2cap_chan_ready(chan);
4244 done:
4245 l2cap_chan_unlock(chan);
4246 return err;
4249 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4250 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4251 u8 *data)
4253 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4254 struct l2cap_disconn_rsp rsp;
4255 u16 dcid, scid;
4256 struct l2cap_chan *chan;
4258 if (cmd_len != sizeof(*req))
4259 return -EPROTO;
4261 scid = __le16_to_cpu(req->scid);
4262 dcid = __le16_to_cpu(req->dcid);
4264 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4266 mutex_lock(&conn->chan_lock);
4268 chan = __l2cap_get_chan_by_scid(conn, dcid);
4269 if (!chan) {
4270 mutex_unlock(&conn->chan_lock);
4271 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4272 return 0;
4275 l2cap_chan_lock(chan);
4277 rsp.dcid = cpu_to_le16(chan->scid);
4278 rsp.scid = cpu_to_le16(chan->dcid);
4279 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4281 chan->ops->set_shutdown(chan);
4283 l2cap_chan_hold(chan);
4284 l2cap_chan_del(chan, ECONNRESET);
4286 l2cap_chan_unlock(chan);
4288 chan->ops->close(chan);
4289 l2cap_chan_put(chan);
4291 mutex_unlock(&conn->chan_lock);
4293 return 0;
4296 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4297 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4298 u8 *data)
4300 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4301 u16 dcid, scid;
4302 struct l2cap_chan *chan;
4304 if (cmd_len != sizeof(*rsp))
4305 return -EPROTO;
4307 scid = __le16_to_cpu(rsp->scid);
4308 dcid = __le16_to_cpu(rsp->dcid);
4310 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4312 mutex_lock(&conn->chan_lock);
4314 chan = __l2cap_get_chan_by_scid(conn, scid);
4315 if (!chan) {
4316 mutex_unlock(&conn->chan_lock);
4317 return 0;
4320 l2cap_chan_lock(chan);
4322 l2cap_chan_hold(chan);
4323 l2cap_chan_del(chan, 0);
4325 l2cap_chan_unlock(chan);
4327 chan->ops->close(chan);
4328 l2cap_chan_put(chan);
4330 mutex_unlock(&conn->chan_lock);
4332 return 0;
4335 static inline int l2cap_information_req(struct l2cap_conn *conn,
4336 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4337 u8 *data)
4339 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4340 u16 type;
4342 if (cmd_len != sizeof(*req))
4343 return -EPROTO;
4345 type = __le16_to_cpu(req->type);
4347 BT_DBG("type 0x%4.4x", type);
4349 if (type == L2CAP_IT_FEAT_MASK) {
4350 u8 buf[8];
4351 u32 feat_mask = l2cap_feat_mask;
4352 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4353 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4354 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4355 if (!disable_ertm)
4356 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4357 | L2CAP_FEAT_FCS;
4358 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4359 feat_mask |= L2CAP_FEAT_EXT_FLOW
4360 | L2CAP_FEAT_EXT_WINDOW;
4362 put_unaligned_le32(feat_mask, rsp->data);
4363 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4364 buf);
4365 } else if (type == L2CAP_IT_FIXED_CHAN) {
4366 u8 buf[12];
4367 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4369 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4370 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4371 rsp->data[0] = conn->local_fixed_chan;
4372 memset(rsp->data + 1, 0, 7);
4373 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4374 buf);
4375 } else {
4376 struct l2cap_info_rsp rsp;
4377 rsp.type = cpu_to_le16(type);
4378 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4379 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4380 &rsp);
4383 return 0;
4386 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4387 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4388 u8 *data)
4390 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4391 u16 type, result;
4393 if (cmd_len < sizeof(*rsp))
4394 return -EPROTO;
4396 type = __le16_to_cpu(rsp->type);
4397 result = __le16_to_cpu(rsp->result);
4399 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4401 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4402 if (cmd->ident != conn->info_ident ||
4403 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4404 return 0;
4406 cancel_delayed_work(&conn->info_timer);
4408 if (result != L2CAP_IR_SUCCESS) {
4409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 conn->info_ident = 0;
4412 l2cap_conn_start(conn);
4414 return 0;
4417 switch (type) {
4418 case L2CAP_IT_FEAT_MASK:
4419 conn->feat_mask = get_unaligned_le32(rsp->data);
4421 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4422 struct l2cap_info_req req;
4423 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4425 conn->info_ident = l2cap_get_ident(conn);
4427 l2cap_send_cmd(conn, conn->info_ident,
4428 L2CAP_INFO_REQ, sizeof(req), &req);
4429 } else {
4430 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4431 conn->info_ident = 0;
4433 l2cap_conn_start(conn);
4435 break;
4437 case L2CAP_IT_FIXED_CHAN:
4438 conn->remote_fixed_chan = rsp->data[0];
4439 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4440 conn->info_ident = 0;
4442 l2cap_conn_start(conn);
4443 break;
4446 return 0;
4449 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4450 struct l2cap_cmd_hdr *cmd,
4451 u16 cmd_len, void *data)
4453 struct l2cap_create_chan_req *req = data;
4454 struct l2cap_create_chan_rsp rsp;
4455 struct l2cap_chan *chan;
4456 struct hci_dev *hdev;
4457 u16 psm, scid;
4459 if (cmd_len != sizeof(*req))
4460 return -EPROTO;
4462 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4463 return -EINVAL;
4465 psm = le16_to_cpu(req->psm);
4466 scid = le16_to_cpu(req->scid);
4468 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4470 /* For controller id 0 make BR/EDR connection */
4471 if (req->amp_id == AMP_ID_BREDR) {
4472 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4473 req->amp_id);
4474 return 0;
4477 /* Validate AMP controller id */
4478 hdev = hci_dev_get(req->amp_id);
4479 if (!hdev)
4480 goto error;
4482 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4483 hci_dev_put(hdev);
4484 goto error;
4487 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4488 req->amp_id);
4489 if (chan) {
4490 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4491 struct hci_conn *hs_hcon;
4493 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4494 &conn->hcon->dst);
4495 if (!hs_hcon) {
4496 hci_dev_put(hdev);
4497 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4498 chan->dcid);
4499 return 0;
4502 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4504 mgr->bredr_chan = chan;
4505 chan->hs_hcon = hs_hcon;
4506 chan->fcs = L2CAP_FCS_NONE;
4507 conn->mtu = hdev->block_mtu;
4510 hci_dev_put(hdev);
4512 return 0;
4514 error:
4515 rsp.dcid = 0;
4516 rsp.scid = cpu_to_le16(scid);
4517 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4518 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4520 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4521 sizeof(rsp), &rsp);
4523 return 0;
4526 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4528 struct l2cap_move_chan_req req;
4529 u8 ident;
4531 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4533 ident = l2cap_get_ident(chan->conn);
4534 chan->ident = ident;
4536 req.icid = cpu_to_le16(chan->scid);
4537 req.dest_amp_id = dest_amp_id;
4539 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4540 &req);
4542 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4545 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4547 struct l2cap_move_chan_rsp rsp;
4549 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4551 rsp.icid = cpu_to_le16(chan->dcid);
4552 rsp.result = cpu_to_le16(result);
4554 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4555 sizeof(rsp), &rsp);
4558 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4560 struct l2cap_move_chan_cfm cfm;
4562 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4564 chan->ident = l2cap_get_ident(chan->conn);
4566 cfm.icid = cpu_to_le16(chan->scid);
4567 cfm.result = cpu_to_le16(result);
4569 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4570 sizeof(cfm), &cfm);
4572 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4575 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4577 struct l2cap_move_chan_cfm cfm;
4579 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4581 cfm.icid = cpu_to_le16(icid);
4582 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4584 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4585 sizeof(cfm), &cfm);
4588 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4589 u16 icid)
4591 struct l2cap_move_chan_cfm_rsp rsp;
4593 BT_DBG("icid 0x%4.4x", icid);
4595 rsp.icid = cpu_to_le16(icid);
4596 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4599 static void __release_logical_link(struct l2cap_chan *chan)
4601 chan->hs_hchan = NULL;
4602 chan->hs_hcon = NULL;
4604 /* Placeholder - release the logical link */
4607 static void l2cap_logical_fail(struct l2cap_chan *chan)
4609 /* Logical link setup failed */
4610 if (chan->state != BT_CONNECTED) {
4611 /* Create channel failure, disconnect */
4612 l2cap_send_disconn_req(chan, ECONNRESET);
4613 return;
4616 switch (chan->move_role) {
4617 case L2CAP_MOVE_ROLE_RESPONDER:
4618 l2cap_move_done(chan);
4619 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4620 break;
4621 case L2CAP_MOVE_ROLE_INITIATOR:
4622 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4623 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4624 /* Remote has only sent pending or
4625 * success responses, clean up
4627 l2cap_move_done(chan);
4630 /* Other amp move states imply that the move
4631 * has already aborted
4633 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4634 break;
4638 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4639 struct hci_chan *hchan)
4641 struct l2cap_conf_rsp rsp;
4643 chan->hs_hchan = hchan;
4644 chan->hs_hcon->l2cap_data = chan->conn;
4646 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4648 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4649 int err;
4651 set_default_fcs(chan);
4653 err = l2cap_ertm_init(chan);
4654 if (err < 0)
4655 l2cap_send_disconn_req(chan, -err);
4656 else
4657 l2cap_chan_ready(chan);
4661 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4662 struct hci_chan *hchan)
4664 chan->hs_hcon = hchan->conn;
4665 chan->hs_hcon->l2cap_data = chan->conn;
4667 BT_DBG("move_state %d", chan->move_state);
4669 switch (chan->move_state) {
4670 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4671 /* Move confirm will be sent after a success
4672 * response is received
4674 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4675 break;
4676 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4677 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4678 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4679 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4680 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4681 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4682 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4683 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4684 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4686 break;
4687 default:
4688 /* Move was not in expected state, free the channel */
4689 __release_logical_link(chan);
4691 chan->move_state = L2CAP_MOVE_STABLE;
4695 /* Call with chan locked */
4696 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4697 u8 status)
4699 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4701 if (status) {
4702 l2cap_logical_fail(chan);
4703 __release_logical_link(chan);
4704 return;
4707 if (chan->state != BT_CONNECTED) {
4708 /* Ignore logical link if channel is on BR/EDR */
4709 if (chan->local_amp_id != AMP_ID_BREDR)
4710 l2cap_logical_finish_create(chan, hchan);
4711 } else {
4712 l2cap_logical_finish_move(chan, hchan);
4716 void l2cap_move_start(struct l2cap_chan *chan)
4718 BT_DBG("chan %p", chan);
4720 if (chan->local_amp_id == AMP_ID_BREDR) {
4721 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4722 return;
4723 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4724 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4725 /* Placeholder - start physical link setup */
4726 } else {
4727 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4728 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4729 chan->move_id = 0;
4730 l2cap_move_setup(chan);
4731 l2cap_send_move_chan_req(chan, 0);
4735 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4736 u8 local_amp_id, u8 remote_amp_id)
4738 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4739 local_amp_id, remote_amp_id);
4741 chan->fcs = L2CAP_FCS_NONE;
4743 /* Outgoing channel on AMP */
4744 if (chan->state == BT_CONNECT) {
4745 if (result == L2CAP_CR_SUCCESS) {
4746 chan->local_amp_id = local_amp_id;
4747 l2cap_send_create_chan_req(chan, remote_amp_id);
4748 } else {
4749 /* Revert to BR/EDR connect */
4750 l2cap_send_conn_req(chan);
4753 return;
4756 /* Incoming channel on AMP */
4757 if (__l2cap_no_conn_pending(chan)) {
4758 struct l2cap_conn_rsp rsp;
4759 char buf[128];
4760 rsp.scid = cpu_to_le16(chan->dcid);
4761 rsp.dcid = cpu_to_le16(chan->scid);
4763 if (result == L2CAP_CR_SUCCESS) {
4764 /* Send successful response */
4765 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4766 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4767 } else {
4768 /* Send negative response */
4769 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4770 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4773 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4774 sizeof(rsp), &rsp);
4776 if (result == L2CAP_CR_SUCCESS) {
4777 l2cap_state_change(chan, BT_CONFIG);
4778 set_bit(CONF_REQ_SENT, &chan->conf_state);
4779 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4780 L2CAP_CONF_REQ,
4781 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4782 chan->num_conf_req++;
4787 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4788 u8 remote_amp_id)
4790 l2cap_move_setup(chan);
4791 chan->move_id = local_amp_id;
4792 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4794 l2cap_send_move_chan_req(chan, remote_amp_id);
4797 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4799 struct hci_chan *hchan = NULL;
4801 /* Placeholder - get hci_chan for logical link */
4803 if (hchan) {
4804 if (hchan->state == BT_CONNECTED) {
4805 /* Logical link is ready to go */
4806 chan->hs_hcon = hchan->conn;
4807 chan->hs_hcon->l2cap_data = chan->conn;
4808 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4809 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4811 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4812 } else {
4813 /* Wait for logical link to be ready */
4814 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4816 } else {
4817 /* Logical link not available */
4818 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4822 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4824 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4825 u8 rsp_result;
4826 if (result == -EINVAL)
4827 rsp_result = L2CAP_MR_BAD_ID;
4828 else
4829 rsp_result = L2CAP_MR_NOT_ALLOWED;
4831 l2cap_send_move_chan_rsp(chan, rsp_result);
4834 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4835 chan->move_state = L2CAP_MOVE_STABLE;
4837 /* Restart data transmission */
4838 l2cap_ertm_send(chan);
4841 /* Invoke with locked chan */
4842 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4844 u8 local_amp_id = chan->local_amp_id;
4845 u8 remote_amp_id = chan->remote_amp_id;
4847 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4848 chan, result, local_amp_id, remote_amp_id);
4850 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4851 l2cap_chan_unlock(chan);
4852 return;
4855 if (chan->state != BT_CONNECTED) {
4856 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4857 } else if (result != L2CAP_MR_SUCCESS) {
4858 l2cap_do_move_cancel(chan, result);
4859 } else {
4860 switch (chan->move_role) {
4861 case L2CAP_MOVE_ROLE_INITIATOR:
4862 l2cap_do_move_initiate(chan, local_amp_id,
4863 remote_amp_id);
4864 break;
4865 case L2CAP_MOVE_ROLE_RESPONDER:
4866 l2cap_do_move_respond(chan, result);
4867 break;
4868 default:
4869 l2cap_do_move_cancel(chan, result);
4870 break;
4875 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4876 struct l2cap_cmd_hdr *cmd,
4877 u16 cmd_len, void *data)
4879 struct l2cap_move_chan_req *req = data;
4880 struct l2cap_move_chan_rsp rsp;
4881 struct l2cap_chan *chan;
4882 u16 icid = 0;
4883 u16 result = L2CAP_MR_NOT_ALLOWED;
4885 if (cmd_len != sizeof(*req))
4886 return -EPROTO;
4888 icid = le16_to_cpu(req->icid);
4890 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4892 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4893 return -EINVAL;
4895 chan = l2cap_get_chan_by_dcid(conn, icid);
4896 if (!chan) {
4897 rsp.icid = cpu_to_le16(icid);
4898 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4899 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4900 sizeof(rsp), &rsp);
4901 return 0;
4904 chan->ident = cmd->ident;
4906 if (chan->scid < L2CAP_CID_DYN_START ||
4907 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4908 (chan->mode != L2CAP_MODE_ERTM &&
4909 chan->mode != L2CAP_MODE_STREAMING)) {
4910 result = L2CAP_MR_NOT_ALLOWED;
4911 goto send_move_response;
4914 if (chan->local_amp_id == req->dest_amp_id) {
4915 result = L2CAP_MR_SAME_ID;
4916 goto send_move_response;
4919 if (req->dest_amp_id != AMP_ID_BREDR) {
4920 struct hci_dev *hdev;
4921 hdev = hci_dev_get(req->dest_amp_id);
4922 if (!hdev || hdev->dev_type != HCI_AMP ||
4923 !test_bit(HCI_UP, &hdev->flags)) {
4924 if (hdev)
4925 hci_dev_put(hdev);
4927 result = L2CAP_MR_BAD_ID;
4928 goto send_move_response;
4930 hci_dev_put(hdev);
4933 /* Detect a move collision. Only send a collision response
4934 * if this side has "lost", otherwise proceed with the move.
4935 * The winner has the larger bd_addr.
4937 if ((__chan_is_moving(chan) ||
4938 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4939 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4940 result = L2CAP_MR_COLLISION;
4941 goto send_move_response;
4944 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4945 l2cap_move_setup(chan);
4946 chan->move_id = req->dest_amp_id;
4947 icid = chan->dcid;
4949 if (req->dest_amp_id == AMP_ID_BREDR) {
4950 /* Moving to BR/EDR */
4951 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4952 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4953 result = L2CAP_MR_PEND;
4954 } else {
4955 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4956 result = L2CAP_MR_SUCCESS;
4958 } else {
4959 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4960 /* Placeholder - uncomment when amp functions are available */
4961 /*amp_accept_physical(chan, req->dest_amp_id);*/
4962 result = L2CAP_MR_PEND;
4965 send_move_response:
4966 l2cap_send_move_chan_rsp(chan, result);
4968 l2cap_chan_unlock(chan);
4970 return 0;
4973 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4975 struct l2cap_chan *chan;
4976 struct hci_chan *hchan = NULL;
4978 chan = l2cap_get_chan_by_scid(conn, icid);
4979 if (!chan) {
4980 l2cap_send_move_chan_cfm_icid(conn, icid);
4981 return;
4984 __clear_chan_timer(chan);
4985 if (result == L2CAP_MR_PEND)
4986 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4988 switch (chan->move_state) {
4989 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4990 /* Move confirm will be sent when logical link
4991 * is complete.
4993 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4994 break;
4995 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4996 if (result == L2CAP_MR_PEND) {
4997 break;
4998 } else if (test_bit(CONN_LOCAL_BUSY,
4999 &chan->conn_state)) {
5000 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5001 } else {
5002 /* Logical link is up or moving to BR/EDR,
5003 * proceed with move
5005 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5006 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5008 break;
5009 case L2CAP_MOVE_WAIT_RSP:
5010 /* Moving to AMP */
5011 if (result == L2CAP_MR_SUCCESS) {
5012 /* Remote is ready, send confirm immediately
5013 * after logical link is ready
5015 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5016 } else {
5017 /* Both logical link and move success
5018 * are required to confirm
5020 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5023 /* Placeholder - get hci_chan for logical link */
5024 if (!hchan) {
5025 /* Logical link not available */
5026 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5027 break;
5030 /* If the logical link is not yet connected, do not
5031 * send confirmation.
5033 if (hchan->state != BT_CONNECTED)
5034 break;
5036 /* Logical link is already ready to go */
5038 chan->hs_hcon = hchan->conn;
5039 chan->hs_hcon->l2cap_data = chan->conn;
5041 if (result == L2CAP_MR_SUCCESS) {
5042 /* Can confirm now */
5043 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5044 } else {
5045 /* Now only need move success
5046 * to confirm
5048 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5051 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5052 break;
5053 default:
5054 /* Any other amp move state means the move failed. */
5055 chan->move_id = chan->local_amp_id;
5056 l2cap_move_done(chan);
5057 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5060 l2cap_chan_unlock(chan);
5063 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5064 u16 result)
5066 struct l2cap_chan *chan;
5068 chan = l2cap_get_chan_by_ident(conn, ident);
5069 if (!chan) {
5070 /* Could not locate channel, icid is best guess */
5071 l2cap_send_move_chan_cfm_icid(conn, icid);
5072 return;
5075 __clear_chan_timer(chan);
5077 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5078 if (result == L2CAP_MR_COLLISION) {
5079 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5080 } else {
5081 /* Cleanup - cancel move */
5082 chan->move_id = chan->local_amp_id;
5083 l2cap_move_done(chan);
5087 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5089 l2cap_chan_unlock(chan);
5092 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5093 struct l2cap_cmd_hdr *cmd,
5094 u16 cmd_len, void *data)
5096 struct l2cap_move_chan_rsp *rsp = data;
5097 u16 icid, result;
5099 if (cmd_len != sizeof(*rsp))
5100 return -EPROTO;
5102 icid = le16_to_cpu(rsp->icid);
5103 result = le16_to_cpu(rsp->result);
5105 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5107 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5108 l2cap_move_continue(conn, icid, result);
5109 else
5110 l2cap_move_fail(conn, cmd->ident, icid, result);
5112 return 0;
5115 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5116 struct l2cap_cmd_hdr *cmd,
5117 u16 cmd_len, void *data)
5119 struct l2cap_move_chan_cfm *cfm = data;
5120 struct l2cap_chan *chan;
5121 u16 icid, result;
5123 if (cmd_len != sizeof(*cfm))
5124 return -EPROTO;
5126 icid = le16_to_cpu(cfm->icid);
5127 result = le16_to_cpu(cfm->result);
5129 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5131 chan = l2cap_get_chan_by_dcid(conn, icid);
5132 if (!chan) {
5133 /* Spec requires a response even if the icid was not found */
5134 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5135 return 0;
5138 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5139 if (result == L2CAP_MC_CONFIRMED) {
5140 chan->local_amp_id = chan->move_id;
5141 if (chan->local_amp_id == AMP_ID_BREDR)
5142 __release_logical_link(chan);
5143 } else {
5144 chan->move_id = chan->local_amp_id;
5147 l2cap_move_done(chan);
5150 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5152 l2cap_chan_unlock(chan);
5154 return 0;
5157 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5158 struct l2cap_cmd_hdr *cmd,
5159 u16 cmd_len, void *data)
5161 struct l2cap_move_chan_cfm_rsp *rsp = data;
5162 struct l2cap_chan *chan;
5163 u16 icid;
5165 if (cmd_len != sizeof(*rsp))
5166 return -EPROTO;
5168 icid = le16_to_cpu(rsp->icid);
5170 BT_DBG("icid 0x%4.4x", icid);
5172 chan = l2cap_get_chan_by_scid(conn, icid);
5173 if (!chan)
5174 return 0;
5176 __clear_chan_timer(chan);
5178 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5179 chan->local_amp_id = chan->move_id;
5181 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5182 __release_logical_link(chan);
5184 l2cap_move_done(chan);
5187 l2cap_chan_unlock(chan);
5189 return 0;
5192 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5193 struct l2cap_cmd_hdr *cmd,
5194 u16 cmd_len, u8 *data)
5196 struct hci_conn *hcon = conn->hcon;
5197 struct l2cap_conn_param_update_req *req;
5198 struct l2cap_conn_param_update_rsp rsp;
5199 u16 min, max, latency, to_multiplier;
5200 int err;
5202 if (hcon->role != HCI_ROLE_MASTER)
5203 return -EINVAL;
5205 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5206 return -EPROTO;
5208 req = (struct l2cap_conn_param_update_req *) data;
5209 min = __le16_to_cpu(req->min);
5210 max = __le16_to_cpu(req->max);
5211 latency = __le16_to_cpu(req->latency);
5212 to_multiplier = __le16_to_cpu(req->to_multiplier);
5214 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5215 min, max, latency, to_multiplier);
5217 memset(&rsp, 0, sizeof(rsp));
5219 err = hci_check_conn_params(min, max, latency, to_multiplier);
5220 if (err)
5221 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5222 else
5223 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5225 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5226 sizeof(rsp), &rsp);
5228 if (!err) {
5229 u8 store_hint;
5231 store_hint = hci_le_conn_update(hcon, min, max, latency,
5232 to_multiplier);
5233 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5234 store_hint, min, max, latency,
5235 to_multiplier);
5239 return 0;
5242 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5243 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5244 u8 *data)
5246 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5247 struct hci_conn *hcon = conn->hcon;
5248 u16 dcid, mtu, mps, credits, result;
5249 struct l2cap_chan *chan;
5250 int err, sec_level;
5252 if (cmd_len < sizeof(*rsp))
5253 return -EPROTO;
5255 dcid = __le16_to_cpu(rsp->dcid);
5256 mtu = __le16_to_cpu(rsp->mtu);
5257 mps = __le16_to_cpu(rsp->mps);
5258 credits = __le16_to_cpu(rsp->credits);
5259 result = __le16_to_cpu(rsp->result);
5261 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5262 dcid < L2CAP_CID_DYN_START ||
5263 dcid > L2CAP_CID_LE_DYN_END))
5264 return -EPROTO;
5266 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5267 dcid, mtu, mps, credits, result);
5269 mutex_lock(&conn->chan_lock);
5271 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5272 if (!chan) {
5273 err = -EBADSLT;
5274 goto unlock;
5277 err = 0;
5279 l2cap_chan_lock(chan);
5281 switch (result) {
5282 case L2CAP_CR_SUCCESS:
5283 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5284 err = -EBADSLT;
5285 break;
5288 chan->ident = 0;
5289 chan->dcid = dcid;
5290 chan->omtu = mtu;
5291 chan->remote_mps = mps;
5292 chan->tx_credits = credits;
5293 l2cap_chan_ready(chan);
5294 break;
5296 case L2CAP_CR_AUTHENTICATION:
5297 case L2CAP_CR_ENCRYPTION:
5298 /* If we already have MITM protection we can't do
5299 * anything.
5301 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5302 l2cap_chan_del(chan, ECONNREFUSED);
5303 break;
5306 sec_level = hcon->sec_level + 1;
5307 if (chan->sec_level < sec_level)
5308 chan->sec_level = sec_level;
5310 /* We'll need to send a new Connect Request */
5311 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5313 smp_conn_security(hcon, chan->sec_level);
5314 break;
5316 default:
5317 l2cap_chan_del(chan, ECONNREFUSED);
5318 break;
5321 l2cap_chan_unlock(chan);
5323 unlock:
5324 mutex_unlock(&conn->chan_lock);
5326 return err;
5329 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5331 u8 *data)
5333 int err = 0;
5335 switch (cmd->code) {
5336 case L2CAP_COMMAND_REJ:
5337 l2cap_command_rej(conn, cmd, cmd_len, data);
5338 break;
5340 case L2CAP_CONN_REQ:
5341 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5342 break;
5344 case L2CAP_CONN_RSP:
5345 case L2CAP_CREATE_CHAN_RSP:
5346 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5347 break;
5349 case L2CAP_CONF_REQ:
5350 err = l2cap_config_req(conn, cmd, cmd_len, data);
5351 break;
5353 case L2CAP_CONF_RSP:
5354 l2cap_config_rsp(conn, cmd, cmd_len, data);
5355 break;
5357 case L2CAP_DISCONN_REQ:
5358 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5359 break;
5361 case L2CAP_DISCONN_RSP:
5362 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5363 break;
5365 case L2CAP_ECHO_REQ:
5366 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5367 break;
5369 case L2CAP_ECHO_RSP:
5370 break;
5372 case L2CAP_INFO_REQ:
5373 err = l2cap_information_req(conn, cmd, cmd_len, data);
5374 break;
5376 case L2CAP_INFO_RSP:
5377 l2cap_information_rsp(conn, cmd, cmd_len, data);
5378 break;
5380 case L2CAP_CREATE_CHAN_REQ:
5381 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5382 break;
5384 case L2CAP_MOVE_CHAN_REQ:
5385 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5386 break;
5388 case L2CAP_MOVE_CHAN_RSP:
5389 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5390 break;
5392 case L2CAP_MOVE_CHAN_CFM:
5393 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5394 break;
5396 case L2CAP_MOVE_CHAN_CFM_RSP:
5397 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5398 break;
5400 default:
5401 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5402 err = -EINVAL;
5403 break;
5406 return err;
5409 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5410 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5411 u8 *data)
5413 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5414 struct l2cap_le_conn_rsp rsp;
5415 struct l2cap_chan *chan, *pchan;
5416 u16 dcid, scid, credits, mtu, mps;
5417 __le16 psm;
5418 u8 result;
5420 if (cmd_len != sizeof(*req))
5421 return -EPROTO;
5423 scid = __le16_to_cpu(req->scid);
5424 mtu = __le16_to_cpu(req->mtu);
5425 mps = __le16_to_cpu(req->mps);
5426 psm = req->psm;
5427 dcid = 0;
5428 credits = 0;
5430 if (mtu < 23 || mps < 23)
5431 return -EPROTO;
5433 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5434 scid, mtu, mps);
5436 /* Check if we have socket listening on psm */
5437 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5438 &conn->hcon->dst, LE_LINK);
5439 if (!pchan) {
5440 result = L2CAP_CR_BAD_PSM;
5441 chan = NULL;
5442 goto response;
5445 mutex_lock(&conn->chan_lock);
5446 l2cap_chan_lock(pchan);
5448 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5449 SMP_ALLOW_STK)) {
5450 result = L2CAP_CR_AUTHENTICATION;
5451 chan = NULL;
5452 goto response_unlock;
5455 /* Check for valid dynamic CID range */
5456 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5457 result = L2CAP_CR_INVALID_SCID;
5458 chan = NULL;
5459 goto response_unlock;
5462 /* Check if we already have channel with that dcid */
5463 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5464 result = L2CAP_CR_SCID_IN_USE;
5465 chan = NULL;
5466 goto response_unlock;
5469 chan = pchan->ops->new_connection(pchan);
5470 if (!chan) {
5471 result = L2CAP_CR_NO_MEM;
5472 goto response_unlock;
5475 l2cap_le_flowctl_init(chan);
5477 bacpy(&chan->src, &conn->hcon->src);
5478 bacpy(&chan->dst, &conn->hcon->dst);
5479 chan->src_type = bdaddr_src_type(conn->hcon);
5480 chan->dst_type = bdaddr_dst_type(conn->hcon);
5481 chan->psm = psm;
5482 chan->dcid = scid;
5483 chan->omtu = mtu;
5484 chan->remote_mps = mps;
5485 chan->tx_credits = __le16_to_cpu(req->credits);
5487 __l2cap_chan_add(conn, chan);
5488 dcid = chan->scid;
5489 credits = chan->rx_credits;
5491 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5493 chan->ident = cmd->ident;
5495 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5496 l2cap_state_change(chan, BT_CONNECT2);
5497 /* The following result value is actually not defined
5498 * for LE CoC but we use it to let the function know
5499 * that it should bail out after doing its cleanup
5500 * instead of sending a response.
5502 result = L2CAP_CR_PEND;
5503 chan->ops->defer(chan);
5504 } else {
5505 l2cap_chan_ready(chan);
5506 result = L2CAP_CR_SUCCESS;
5509 response_unlock:
5510 l2cap_chan_unlock(pchan);
5511 mutex_unlock(&conn->chan_lock);
5512 l2cap_chan_put(pchan);
5514 if (result == L2CAP_CR_PEND)
5515 return 0;
5517 response:
5518 if (chan) {
5519 rsp.mtu = cpu_to_le16(chan->imtu);
5520 rsp.mps = cpu_to_le16(chan->mps);
5521 } else {
5522 rsp.mtu = 0;
5523 rsp.mps = 0;
5526 rsp.dcid = cpu_to_le16(dcid);
5527 rsp.credits = cpu_to_le16(credits);
5528 rsp.result = cpu_to_le16(result);
5530 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5532 return 0;
5535 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5536 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5537 u8 *data)
5539 struct l2cap_le_credits *pkt;
5540 struct l2cap_chan *chan;
5541 u16 cid, credits, max_credits;
5543 if (cmd_len != sizeof(*pkt))
5544 return -EPROTO;
5546 pkt = (struct l2cap_le_credits *) data;
5547 cid = __le16_to_cpu(pkt->cid);
5548 credits = __le16_to_cpu(pkt->credits);
5550 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5552 chan = l2cap_get_chan_by_dcid(conn, cid);
5553 if (!chan)
5554 return -EBADSLT;
5556 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5557 if (credits > max_credits) {
5558 BT_ERR("LE credits overflow");
5559 l2cap_send_disconn_req(chan, ECONNRESET);
5560 l2cap_chan_unlock(chan);
5562 /* Return 0 so that we don't trigger an unnecessary
5563 * command reject packet.
5565 return 0;
5568 chan->tx_credits += credits;
5570 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5571 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5572 chan->tx_credits--;
5575 if (chan->tx_credits)
5576 chan->ops->resume(chan);
5578 l2cap_chan_unlock(chan);
5580 return 0;
5583 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5584 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5585 u8 *data)
5587 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5588 struct l2cap_chan *chan;
5590 if (cmd_len < sizeof(*rej))
5591 return -EPROTO;
5593 mutex_lock(&conn->chan_lock);
5595 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5596 if (!chan)
5597 goto done;
5599 l2cap_chan_lock(chan);
5600 l2cap_chan_del(chan, ECONNREFUSED);
5601 l2cap_chan_unlock(chan);
5603 done:
5604 mutex_unlock(&conn->chan_lock);
5605 return 0;
5608 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5609 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5610 u8 *data)
5612 int err = 0;
5614 switch (cmd->code) {
5615 case L2CAP_COMMAND_REJ:
5616 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5617 break;
5619 case L2CAP_CONN_PARAM_UPDATE_REQ:
5620 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5621 break;
5623 case L2CAP_CONN_PARAM_UPDATE_RSP:
5624 break;
5626 case L2CAP_LE_CONN_RSP:
5627 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5628 break;
5630 case L2CAP_LE_CONN_REQ:
5631 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5632 break;
5634 case L2CAP_LE_CREDITS:
5635 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5636 break;
5638 case L2CAP_DISCONN_REQ:
5639 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5640 break;
5642 case L2CAP_DISCONN_RSP:
5643 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5644 break;
5646 default:
5647 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5648 err = -EINVAL;
5649 break;
5652 return err;
5655 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5656 struct sk_buff *skb)
5658 struct hci_conn *hcon = conn->hcon;
5659 struct l2cap_cmd_hdr *cmd;
5660 u16 len;
5661 int err;
5663 if (hcon->type != LE_LINK)
5664 goto drop;
5666 if (skb->len < L2CAP_CMD_HDR_SIZE)
5667 goto drop;
5669 cmd = (void *) skb->data;
5670 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5672 len = le16_to_cpu(cmd->len);
5674 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5676 if (len != skb->len || !cmd->ident) {
5677 BT_DBG("corrupted command");
5678 goto drop;
5681 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5682 if (err) {
5683 struct l2cap_cmd_rej_unk rej;
5685 BT_ERR("Wrong link type (%d)", err);
5687 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5688 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5689 sizeof(rej), &rej);
5692 drop:
5693 kfree_skb(skb);
5696 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5697 struct sk_buff *skb)
5699 struct hci_conn *hcon = conn->hcon;
5700 u8 *data = skb->data;
5701 int len = skb->len;
5702 struct l2cap_cmd_hdr cmd;
5703 int err;
5705 l2cap_raw_recv(conn, skb);
5707 if (hcon->type != ACL_LINK)
5708 goto drop;
5710 while (len >= L2CAP_CMD_HDR_SIZE) {
5711 u16 cmd_len;
5712 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5713 data += L2CAP_CMD_HDR_SIZE;
5714 len -= L2CAP_CMD_HDR_SIZE;
5716 cmd_len = le16_to_cpu(cmd.len);
5718 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5719 cmd.ident);
5721 if (cmd_len > len || !cmd.ident) {
5722 BT_DBG("corrupted command");
5723 break;
5726 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5727 if (err) {
5728 struct l2cap_cmd_rej_unk rej;
5730 BT_ERR("Wrong link type (%d)", err);
5732 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5733 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5734 sizeof(rej), &rej);
5737 data += cmd_len;
5738 len -= cmd_len;
5741 drop:
5742 kfree_skb(skb);
5745 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5747 u16 our_fcs, rcv_fcs;
5748 int hdr_size;
5750 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5751 hdr_size = L2CAP_EXT_HDR_SIZE;
5752 else
5753 hdr_size = L2CAP_ENH_HDR_SIZE;
5755 if (chan->fcs == L2CAP_FCS_CRC16) {
5756 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5757 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5758 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5760 if (our_fcs != rcv_fcs)
5761 return -EBADMSG;
5763 return 0;
5766 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5768 struct l2cap_ctrl control;
5770 BT_DBG("chan %p", chan);
5772 memset(&control, 0, sizeof(control));
5773 control.sframe = 1;
5774 control.final = 1;
5775 control.reqseq = chan->buffer_seq;
5776 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5778 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5779 control.super = L2CAP_SUPER_RNR;
5780 l2cap_send_sframe(chan, &control);
5783 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5784 chan->unacked_frames > 0)
5785 __set_retrans_timer(chan);
5787 /* Send pending iframes */
5788 l2cap_ertm_send(chan);
5790 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5791 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5792 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5793 * send it now.
5795 control.super = L2CAP_SUPER_RR;
5796 l2cap_send_sframe(chan, &control);
5800 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5801 struct sk_buff **last_frag)
5803 /* skb->len reflects data in skb as well as all fragments
5804 * skb->data_len reflects only data in fragments
5806 if (!skb_has_frag_list(skb))
5807 skb_shinfo(skb)->frag_list = new_frag;
5809 new_frag->next = NULL;
5811 (*last_frag)->next = new_frag;
5812 *last_frag = new_frag;
5814 skb->len += new_frag->len;
5815 skb->data_len += new_frag->len;
5816 skb->truesize += new_frag->truesize;
5819 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5820 struct l2cap_ctrl *control)
5822 int err = -EINVAL;
5824 switch (control->sar) {
5825 case L2CAP_SAR_UNSEGMENTED:
5826 if (chan->sdu)
5827 break;
5829 err = chan->ops->recv(chan, skb);
5830 break;
5832 case L2CAP_SAR_START:
5833 if (chan->sdu)
5834 break;
5836 chan->sdu_len = get_unaligned_le16(skb->data);
5837 skb_pull(skb, L2CAP_SDULEN_SIZE);
5839 if (chan->sdu_len > chan->imtu) {
5840 err = -EMSGSIZE;
5841 break;
5844 if (skb->len >= chan->sdu_len)
5845 break;
5847 chan->sdu = skb;
5848 chan->sdu_last_frag = skb;
5850 skb = NULL;
5851 err = 0;
5852 break;
5854 case L2CAP_SAR_CONTINUE:
5855 if (!chan->sdu)
5856 break;
5858 append_skb_frag(chan->sdu, skb,
5859 &chan->sdu_last_frag);
5860 skb = NULL;
5862 if (chan->sdu->len >= chan->sdu_len)
5863 break;
5865 err = 0;
5866 break;
5868 case L2CAP_SAR_END:
5869 if (!chan->sdu)
5870 break;
5872 append_skb_frag(chan->sdu, skb,
5873 &chan->sdu_last_frag);
5874 skb = NULL;
5876 if (chan->sdu->len != chan->sdu_len)
5877 break;
5879 err = chan->ops->recv(chan, chan->sdu);
5881 if (!err) {
5882 /* Reassembly complete */
5883 chan->sdu = NULL;
5884 chan->sdu_last_frag = NULL;
5885 chan->sdu_len = 0;
5887 break;
5890 if (err) {
5891 kfree_skb(skb);
5892 kfree_skb(chan->sdu);
5893 chan->sdu = NULL;
5894 chan->sdu_last_frag = NULL;
5895 chan->sdu_len = 0;
5898 return err;
5901 static int l2cap_resegment(struct l2cap_chan *chan)
5903 /* Placeholder */
5904 return 0;
5907 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5909 u8 event;
5911 if (chan->mode != L2CAP_MODE_ERTM)
5912 return;
5914 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5915 l2cap_tx(chan, NULL, NULL, event);
5918 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5920 int err = 0;
5921 /* Pass sequential frames to l2cap_reassemble_sdu()
5922 * until a gap is encountered.
5925 BT_DBG("chan %p", chan);
5927 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5928 struct sk_buff *skb;
5929 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5930 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5932 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5934 if (!skb)
5935 break;
5937 skb_unlink(skb, &chan->srej_q);
5938 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5939 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5940 if (err)
5941 break;
5944 if (skb_queue_empty(&chan->srej_q)) {
5945 chan->rx_state = L2CAP_RX_STATE_RECV;
5946 l2cap_send_ack(chan);
5949 return err;
5952 static void l2cap_handle_srej(struct l2cap_chan *chan,
5953 struct l2cap_ctrl *control)
5955 struct sk_buff *skb;
5957 BT_DBG("chan %p, control %p", chan, control);
5959 if (control->reqseq == chan->next_tx_seq) {
5960 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5961 l2cap_send_disconn_req(chan, ECONNRESET);
5962 return;
5965 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5967 if (skb == NULL) {
5968 BT_DBG("Seq %d not available for retransmission",
5969 control->reqseq);
5970 return;
5973 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5974 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5975 l2cap_send_disconn_req(chan, ECONNRESET);
5976 return;
5979 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5981 if (control->poll) {
5982 l2cap_pass_to_tx(chan, control);
5984 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5985 l2cap_retransmit(chan, control);
5986 l2cap_ertm_send(chan);
5988 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5989 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5990 chan->srej_save_reqseq = control->reqseq;
5992 } else {
5993 l2cap_pass_to_tx_fbit(chan, control);
5995 if (control->final) {
5996 if (chan->srej_save_reqseq != control->reqseq ||
5997 !test_and_clear_bit(CONN_SREJ_ACT,
5998 &chan->conn_state))
5999 l2cap_retransmit(chan, control);
6000 } else {
6001 l2cap_retransmit(chan, control);
6002 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6003 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6004 chan->srej_save_reqseq = control->reqseq;
6010 static void l2cap_handle_rej(struct l2cap_chan *chan,
6011 struct l2cap_ctrl *control)
6013 struct sk_buff *skb;
6015 BT_DBG("chan %p, control %p", chan, control);
6017 if (control->reqseq == chan->next_tx_seq) {
6018 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6019 l2cap_send_disconn_req(chan, ECONNRESET);
6020 return;
6023 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6025 if (chan->max_tx && skb &&
6026 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6027 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6028 l2cap_send_disconn_req(chan, ECONNRESET);
6029 return;
6032 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6034 l2cap_pass_to_tx(chan, control);
6036 if (control->final) {
6037 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6038 l2cap_retransmit_all(chan, control);
6039 } else {
6040 l2cap_retransmit_all(chan, control);
6041 l2cap_ertm_send(chan);
6042 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6043 set_bit(CONN_REJ_ACT, &chan->conn_state);
6047 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6049 BT_DBG("chan %p, txseq %d", chan, txseq);
6051 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6052 chan->expected_tx_seq);
6054 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6055 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6056 chan->tx_win) {
6057 /* See notes below regarding "double poll" and
6058 * invalid packets.
6060 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6061 BT_DBG("Invalid/Ignore - after SREJ");
6062 return L2CAP_TXSEQ_INVALID_IGNORE;
6063 } else {
6064 BT_DBG("Invalid - in window after SREJ sent");
6065 return L2CAP_TXSEQ_INVALID;
6069 if (chan->srej_list.head == txseq) {
6070 BT_DBG("Expected SREJ");
6071 return L2CAP_TXSEQ_EXPECTED_SREJ;
6074 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6075 BT_DBG("Duplicate SREJ - txseq already stored");
6076 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6079 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6080 BT_DBG("Unexpected SREJ - not requested");
6081 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6085 if (chan->expected_tx_seq == txseq) {
6086 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6087 chan->tx_win) {
6088 BT_DBG("Invalid - txseq outside tx window");
6089 return L2CAP_TXSEQ_INVALID;
6090 } else {
6091 BT_DBG("Expected");
6092 return L2CAP_TXSEQ_EXPECTED;
6096 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6097 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6098 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6099 return L2CAP_TXSEQ_DUPLICATE;
6102 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6103 /* A source of invalid packets is a "double poll" condition,
6104 * where delays cause us to send multiple poll packets. If
6105 * the remote stack receives and processes both polls,
6106 * sequence numbers can wrap around in such a way that a
6107 * resent frame has a sequence number that looks like new data
6108 * with a sequence gap. This would trigger an erroneous SREJ
6109 * request.
6111 * Fortunately, this is impossible with a tx window that's
6112 * less than half of the maximum sequence number, which allows
6113 * invalid frames to be safely ignored.
6115 * With tx window sizes greater than half of the tx window
6116 * maximum, the frame is invalid and cannot be ignored. This
6117 * causes a disconnect.
6120 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6121 BT_DBG("Invalid/Ignore - txseq outside tx window");
6122 return L2CAP_TXSEQ_INVALID_IGNORE;
6123 } else {
6124 BT_DBG("Invalid - txseq outside tx window");
6125 return L2CAP_TXSEQ_INVALID;
6127 } else {
6128 BT_DBG("Unexpected - txseq indicates missing frames");
6129 return L2CAP_TXSEQ_UNEXPECTED;
6133 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6134 struct l2cap_ctrl *control,
6135 struct sk_buff *skb, u8 event)
6137 int err = 0;
6138 bool skb_in_use = false;
6140 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6141 event);
6143 switch (event) {
6144 case L2CAP_EV_RECV_IFRAME:
6145 switch (l2cap_classify_txseq(chan, control->txseq)) {
6146 case L2CAP_TXSEQ_EXPECTED:
6147 l2cap_pass_to_tx(chan, control);
6149 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6150 BT_DBG("Busy, discarding expected seq %d",
6151 control->txseq);
6152 break;
6155 chan->expected_tx_seq = __next_seq(chan,
6156 control->txseq);
6158 chan->buffer_seq = chan->expected_tx_seq;
6159 skb_in_use = true;
6161 err = l2cap_reassemble_sdu(chan, skb, control);
6162 if (err)
6163 break;
6165 if (control->final) {
6166 if (!test_and_clear_bit(CONN_REJ_ACT,
6167 &chan->conn_state)) {
6168 control->final = 0;
6169 l2cap_retransmit_all(chan, control);
6170 l2cap_ertm_send(chan);
6174 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6175 l2cap_send_ack(chan);
6176 break;
6177 case L2CAP_TXSEQ_UNEXPECTED:
6178 l2cap_pass_to_tx(chan, control);
6180 /* Can't issue SREJ frames in the local busy state.
6181 * Drop this frame, it will be seen as missing
6182 * when local busy is exited.
6184 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6185 BT_DBG("Busy, discarding unexpected seq %d",
6186 control->txseq);
6187 break;
6190 /* There was a gap in the sequence, so an SREJ
6191 * must be sent for each missing frame. The
6192 * current frame is stored for later use.
6194 skb_queue_tail(&chan->srej_q, skb);
6195 skb_in_use = true;
6196 BT_DBG("Queued %p (queue len %d)", skb,
6197 skb_queue_len(&chan->srej_q));
6199 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6200 l2cap_seq_list_clear(&chan->srej_list);
6201 l2cap_send_srej(chan, control->txseq);
6203 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6204 break;
6205 case L2CAP_TXSEQ_DUPLICATE:
6206 l2cap_pass_to_tx(chan, control);
6207 break;
6208 case L2CAP_TXSEQ_INVALID_IGNORE:
6209 break;
6210 case L2CAP_TXSEQ_INVALID:
6211 default:
6212 l2cap_send_disconn_req(chan, ECONNRESET);
6213 break;
6215 break;
6216 case L2CAP_EV_RECV_RR:
6217 l2cap_pass_to_tx(chan, control);
6218 if (control->final) {
6219 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6221 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6222 !__chan_is_moving(chan)) {
6223 control->final = 0;
6224 l2cap_retransmit_all(chan, control);
6227 l2cap_ertm_send(chan);
6228 } else if (control->poll) {
6229 l2cap_send_i_or_rr_or_rnr(chan);
6230 } else {
6231 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6232 &chan->conn_state) &&
6233 chan->unacked_frames)
6234 __set_retrans_timer(chan);
6236 l2cap_ertm_send(chan);
6238 break;
6239 case L2CAP_EV_RECV_RNR:
6240 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6241 l2cap_pass_to_tx(chan, control);
6242 if (control && control->poll) {
6243 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6244 l2cap_send_rr_or_rnr(chan, 0);
6246 __clear_retrans_timer(chan);
6247 l2cap_seq_list_clear(&chan->retrans_list);
6248 break;
6249 case L2CAP_EV_RECV_REJ:
6250 l2cap_handle_rej(chan, control);
6251 break;
6252 case L2CAP_EV_RECV_SREJ:
6253 l2cap_handle_srej(chan, control);
6254 break;
6255 default:
6256 break;
6259 if (skb && !skb_in_use) {
6260 BT_DBG("Freeing %p", skb);
6261 kfree_skb(skb);
6264 return err;
6267 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6268 struct l2cap_ctrl *control,
6269 struct sk_buff *skb, u8 event)
6271 int err = 0;
6272 u16 txseq = control->txseq;
6273 bool skb_in_use = false;
6275 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6276 event);
6278 switch (event) {
6279 case L2CAP_EV_RECV_IFRAME:
6280 switch (l2cap_classify_txseq(chan, txseq)) {
6281 case L2CAP_TXSEQ_EXPECTED:
6282 /* Keep frame for reassembly later */
6283 l2cap_pass_to_tx(chan, control);
6284 skb_queue_tail(&chan->srej_q, skb);
6285 skb_in_use = true;
6286 BT_DBG("Queued %p (queue len %d)", skb,
6287 skb_queue_len(&chan->srej_q));
6289 chan->expected_tx_seq = __next_seq(chan, txseq);
6290 break;
6291 case L2CAP_TXSEQ_EXPECTED_SREJ:
6292 l2cap_seq_list_pop(&chan->srej_list);
6294 l2cap_pass_to_tx(chan, control);
6295 skb_queue_tail(&chan->srej_q, skb);
6296 skb_in_use = true;
6297 BT_DBG("Queued %p (queue len %d)", skb,
6298 skb_queue_len(&chan->srej_q));
6300 err = l2cap_rx_queued_iframes(chan);
6301 if (err)
6302 break;
6304 break;
6305 case L2CAP_TXSEQ_UNEXPECTED:
6306 /* Got a frame that can't be reassembled yet.
6307 * Save it for later, and send SREJs to cover
6308 * the missing frames.
6310 skb_queue_tail(&chan->srej_q, skb);
6311 skb_in_use = true;
6312 BT_DBG("Queued %p (queue len %d)", skb,
6313 skb_queue_len(&chan->srej_q));
6315 l2cap_pass_to_tx(chan, control);
6316 l2cap_send_srej(chan, control->txseq);
6317 break;
6318 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6319 /* This frame was requested with an SREJ, but
6320 * some expected retransmitted frames are
6321 * missing. Request retransmission of missing
6322 * SREJ'd frames.
6324 skb_queue_tail(&chan->srej_q, skb);
6325 skb_in_use = true;
6326 BT_DBG("Queued %p (queue len %d)", skb,
6327 skb_queue_len(&chan->srej_q));
6329 l2cap_pass_to_tx(chan, control);
6330 l2cap_send_srej_list(chan, control->txseq);
6331 break;
6332 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6333 /* We've already queued this frame. Drop this copy. */
6334 l2cap_pass_to_tx(chan, control);
6335 break;
6336 case L2CAP_TXSEQ_DUPLICATE:
6337 /* Expecting a later sequence number, so this frame
6338 * was already received. Ignore it completely.
6340 break;
6341 case L2CAP_TXSEQ_INVALID_IGNORE:
6342 break;
6343 case L2CAP_TXSEQ_INVALID:
6344 default:
6345 l2cap_send_disconn_req(chan, ECONNRESET);
6346 break;
6348 break;
6349 case L2CAP_EV_RECV_RR:
6350 l2cap_pass_to_tx(chan, control);
6351 if (control->final) {
6352 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6354 if (!test_and_clear_bit(CONN_REJ_ACT,
6355 &chan->conn_state)) {
6356 control->final = 0;
6357 l2cap_retransmit_all(chan, control);
6360 l2cap_ertm_send(chan);
6361 } else if (control->poll) {
6362 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6363 &chan->conn_state) &&
6364 chan->unacked_frames) {
6365 __set_retrans_timer(chan);
6368 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6369 l2cap_send_srej_tail(chan);
6370 } else {
6371 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6372 &chan->conn_state) &&
6373 chan->unacked_frames)
6374 __set_retrans_timer(chan);
6376 l2cap_send_ack(chan);
6378 break;
6379 case L2CAP_EV_RECV_RNR:
6380 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6381 l2cap_pass_to_tx(chan, control);
6382 if (control->poll) {
6383 l2cap_send_srej_tail(chan);
6384 } else {
6385 struct l2cap_ctrl rr_control;
6386 memset(&rr_control, 0, sizeof(rr_control));
6387 rr_control.sframe = 1;
6388 rr_control.super = L2CAP_SUPER_RR;
6389 rr_control.reqseq = chan->buffer_seq;
6390 l2cap_send_sframe(chan, &rr_control);
6393 break;
6394 case L2CAP_EV_RECV_REJ:
6395 l2cap_handle_rej(chan, control);
6396 break;
6397 case L2CAP_EV_RECV_SREJ:
6398 l2cap_handle_srej(chan, control);
6399 break;
6402 if (skb && !skb_in_use) {
6403 BT_DBG("Freeing %p", skb);
6404 kfree_skb(skb);
6407 return err;
6410 static int l2cap_finish_move(struct l2cap_chan *chan)
6412 BT_DBG("chan %p", chan);
6414 chan->rx_state = L2CAP_RX_STATE_RECV;
6416 if (chan->hs_hcon)
6417 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6418 else
6419 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6421 return l2cap_resegment(chan);
6424 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6425 struct l2cap_ctrl *control,
6426 struct sk_buff *skb, u8 event)
6428 int err;
6430 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6431 event);
6433 if (!control->poll)
6434 return -EPROTO;
6436 l2cap_process_reqseq(chan, control->reqseq);
6438 if (!skb_queue_empty(&chan->tx_q))
6439 chan->tx_send_head = skb_peek(&chan->tx_q);
6440 else
6441 chan->tx_send_head = NULL;
6443 /* Rewind next_tx_seq to the point expected
6444 * by the receiver.
6446 chan->next_tx_seq = control->reqseq;
6447 chan->unacked_frames = 0;
6449 err = l2cap_finish_move(chan);
6450 if (err)
6451 return err;
6453 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6454 l2cap_send_i_or_rr_or_rnr(chan);
6456 if (event == L2CAP_EV_RECV_IFRAME)
6457 return -EPROTO;
6459 return l2cap_rx_state_recv(chan, control, NULL, event);
6462 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6463 struct l2cap_ctrl *control,
6464 struct sk_buff *skb, u8 event)
6466 int err;
6468 if (!control->final)
6469 return -EPROTO;
6471 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6473 chan->rx_state = L2CAP_RX_STATE_RECV;
6474 l2cap_process_reqseq(chan, control->reqseq);
6476 if (!skb_queue_empty(&chan->tx_q))
6477 chan->tx_send_head = skb_peek(&chan->tx_q);
6478 else
6479 chan->tx_send_head = NULL;
6481 /* Rewind next_tx_seq to the point expected
6482 * by the receiver.
6484 chan->next_tx_seq = control->reqseq;
6485 chan->unacked_frames = 0;
6487 if (chan->hs_hcon)
6488 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6489 else
6490 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6492 err = l2cap_resegment(chan);
6494 if (!err)
6495 err = l2cap_rx_state_recv(chan, control, skb, event);
6497 return err;
6500 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6502 /* Make sure reqseq is for a packet that has been sent but not acked */
6503 u16 unacked;
6505 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6506 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6509 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6510 struct sk_buff *skb, u8 event)
6512 int err = 0;
6514 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6515 control, skb, event, chan->rx_state);
6517 if (__valid_reqseq(chan, control->reqseq)) {
6518 switch (chan->rx_state) {
6519 case L2CAP_RX_STATE_RECV:
6520 err = l2cap_rx_state_recv(chan, control, skb, event);
6521 break;
6522 case L2CAP_RX_STATE_SREJ_SENT:
6523 err = l2cap_rx_state_srej_sent(chan, control, skb,
6524 event);
6525 break;
6526 case L2CAP_RX_STATE_WAIT_P:
6527 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6528 break;
6529 case L2CAP_RX_STATE_WAIT_F:
6530 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6531 break;
6532 default:
6533 /* shut it down */
6534 break;
6536 } else {
6537 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6538 control->reqseq, chan->next_tx_seq,
6539 chan->expected_ack_seq);
6540 l2cap_send_disconn_req(chan, ECONNRESET);
6543 return err;
6546 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6547 struct sk_buff *skb)
6549 int err = 0;
6551 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6552 chan->rx_state);
6554 if (l2cap_classify_txseq(chan, control->txseq) ==
6555 L2CAP_TXSEQ_EXPECTED) {
6556 l2cap_pass_to_tx(chan, control);
6558 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6559 __next_seq(chan, chan->buffer_seq));
6561 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6563 l2cap_reassemble_sdu(chan, skb, control);
6564 } else {
6565 if (chan->sdu) {
6566 kfree_skb(chan->sdu);
6567 chan->sdu = NULL;
6569 chan->sdu_last_frag = NULL;
6570 chan->sdu_len = 0;
6572 if (skb) {
6573 BT_DBG("Freeing %p", skb);
6574 kfree_skb(skb);
6578 chan->last_acked_seq = control->txseq;
6579 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6581 return err;
6584 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6586 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6587 u16 len;
6588 u8 event;
6590 __unpack_control(chan, skb);
6592 len = skb->len;
6595 * We can just drop the corrupted I-frame here.
6596 * Receiver will miss it and start proper recovery
6597 * procedures and ask for retransmission.
6599 if (l2cap_check_fcs(chan, skb))
6600 goto drop;
6602 if (!control->sframe && control->sar == L2CAP_SAR_START)
6603 len -= L2CAP_SDULEN_SIZE;
6605 if (chan->fcs == L2CAP_FCS_CRC16)
6606 len -= L2CAP_FCS_SIZE;
6608 if (len > chan->mps) {
6609 l2cap_send_disconn_req(chan, ECONNRESET);
6610 goto drop;
6613 if (!control->sframe) {
6614 int err;
6616 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6617 control->sar, control->reqseq, control->final,
6618 control->txseq);
6620 /* Validate F-bit - F=0 always valid, F=1 only
6621 * valid in TX WAIT_F
6623 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6624 goto drop;
6626 if (chan->mode != L2CAP_MODE_STREAMING) {
6627 event = L2CAP_EV_RECV_IFRAME;
6628 err = l2cap_rx(chan, control, skb, event);
6629 } else {
6630 err = l2cap_stream_rx(chan, control, skb);
6633 if (err)
6634 l2cap_send_disconn_req(chan, ECONNRESET);
6635 } else {
6636 const u8 rx_func_to_event[4] = {
6637 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6638 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6641 /* Only I-frames are expected in streaming mode */
6642 if (chan->mode == L2CAP_MODE_STREAMING)
6643 goto drop;
6645 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6646 control->reqseq, control->final, control->poll,
6647 control->super);
6649 if (len != 0) {
6650 BT_ERR("Trailing bytes: %d in sframe", len);
6651 l2cap_send_disconn_req(chan, ECONNRESET);
6652 goto drop;
6655 /* Validate F and P bits */
6656 if (control->final && (control->poll ||
6657 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6658 goto drop;
6660 event = rx_func_to_event[control->super];
6661 if (l2cap_rx(chan, control, skb, event))
6662 l2cap_send_disconn_req(chan, ECONNRESET);
6665 return 0;
6667 drop:
6668 kfree_skb(skb);
6669 return 0;
6672 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6674 struct l2cap_conn *conn = chan->conn;
6675 struct l2cap_le_credits pkt;
6676 u16 return_credits;
6678 /* We return more credits to the sender only after the amount of
6679 * credits falls below half of the initial amount.
6681 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6682 return;
6684 return_credits = le_max_credits - chan->rx_credits;
6686 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6688 chan->rx_credits += return_credits;
6690 pkt.cid = cpu_to_le16(chan->scid);
6691 pkt.credits = cpu_to_le16(return_credits);
6693 chan->ident = l2cap_get_ident(conn);
6695 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6698 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6700 int err;
6702 if (!chan->rx_credits) {
6703 BT_ERR("No credits to receive LE L2CAP data");
6704 l2cap_send_disconn_req(chan, ECONNRESET);
6705 return -ENOBUFS;
6708 if (chan->imtu < skb->len) {
6709 BT_ERR("Too big LE L2CAP PDU");
6710 return -ENOBUFS;
6713 chan->rx_credits--;
6714 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6716 l2cap_chan_le_send_credits(chan);
6718 err = 0;
6720 if (!chan->sdu) {
6721 u16 sdu_len;
6723 sdu_len = get_unaligned_le16(skb->data);
6724 skb_pull(skb, L2CAP_SDULEN_SIZE);
6726 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6727 sdu_len, skb->len, chan->imtu);
6729 if (sdu_len > chan->imtu) {
6730 BT_ERR("Too big LE L2CAP SDU length received");
6731 err = -EMSGSIZE;
6732 goto failed;
6735 if (skb->len > sdu_len) {
6736 BT_ERR("Too much LE L2CAP data received");
6737 err = -EINVAL;
6738 goto failed;
6741 if (skb->len == sdu_len)
6742 return chan->ops->recv(chan, skb);
6744 chan->sdu = skb;
6745 chan->sdu_len = sdu_len;
6746 chan->sdu_last_frag = skb;
6748 return 0;
6751 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6752 chan->sdu->len, skb->len, chan->sdu_len);
6754 if (chan->sdu->len + skb->len > chan->sdu_len) {
6755 BT_ERR("Too much LE L2CAP data received");
6756 err = -EINVAL;
6757 goto failed;
6760 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6761 skb = NULL;
6763 if (chan->sdu->len == chan->sdu_len) {
6764 err = chan->ops->recv(chan, chan->sdu);
6765 if (!err) {
6766 chan->sdu = NULL;
6767 chan->sdu_last_frag = NULL;
6768 chan->sdu_len = 0;
6772 failed:
6773 if (err) {
6774 kfree_skb(skb);
6775 kfree_skb(chan->sdu);
6776 chan->sdu = NULL;
6777 chan->sdu_last_frag = NULL;
6778 chan->sdu_len = 0;
6781 /* We can't return an error here since we took care of the skb
6782 * freeing internally. An error return would cause the caller to
6783 * do a double-free of the skb.
6785 return 0;
6788 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6789 struct sk_buff *skb)
6791 struct l2cap_chan *chan;
6793 chan = l2cap_get_chan_by_scid(conn, cid);
6794 if (!chan) {
6795 if (cid == L2CAP_CID_A2MP) {
6796 chan = a2mp_channel_create(conn, skb);
6797 if (!chan) {
6798 kfree_skb(skb);
6799 return;
6802 l2cap_chan_lock(chan);
6803 } else {
6804 BT_DBG("unknown cid 0x%4.4x", cid);
6805 /* Drop packet and return */
6806 kfree_skb(skb);
6807 return;
6811 BT_DBG("chan %p, len %d", chan, skb->len);
6813 /* If we receive data on a fixed channel before the info req/rsp
6814 * procdure is done simply assume that the channel is supported
6815 * and mark it as ready.
6817 if (chan->chan_type == L2CAP_CHAN_FIXED)
6818 l2cap_chan_ready(chan);
6820 if (chan->state != BT_CONNECTED)
6821 goto drop;
6823 switch (chan->mode) {
6824 case L2CAP_MODE_LE_FLOWCTL:
6825 if (l2cap_le_data_rcv(chan, skb) < 0)
6826 goto drop;
6828 goto done;
6830 case L2CAP_MODE_BASIC:
6831 /* If socket recv buffers overflows we drop data here
6832 * which is *bad* because L2CAP has to be reliable.
6833 * But we don't have any other choice. L2CAP doesn't
6834 * provide flow control mechanism. */
6836 if (chan->imtu < skb->len) {
6837 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6838 goto drop;
6841 if (!chan->ops->recv(chan, skb))
6842 goto done;
6843 break;
6845 case L2CAP_MODE_ERTM:
6846 case L2CAP_MODE_STREAMING:
6847 l2cap_data_rcv(chan, skb);
6848 goto done;
6850 default:
6851 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6852 break;
6855 drop:
6856 kfree_skb(skb);
6858 done:
6859 l2cap_chan_unlock(chan);
6862 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6863 struct sk_buff *skb)
6865 struct hci_conn *hcon = conn->hcon;
6866 struct l2cap_chan *chan;
6868 if (hcon->type != ACL_LINK)
6869 goto free_skb;
6871 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6872 ACL_LINK);
6873 if (!chan)
6874 goto free_skb;
6876 BT_DBG("chan %p, len %d", chan, skb->len);
6878 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6879 goto drop;
6881 if (chan->imtu < skb->len)
6882 goto drop;
6884 /* Store remote BD_ADDR and PSM for msg_name */
6885 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6886 bt_cb(skb)->l2cap.psm = psm;
6888 if (!chan->ops->recv(chan, skb)) {
6889 l2cap_chan_put(chan);
6890 return;
6893 drop:
6894 l2cap_chan_put(chan);
6895 free_skb:
6896 kfree_skb(skb);
6899 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6901 struct l2cap_hdr *lh = (void *) skb->data;
6902 struct hci_conn *hcon = conn->hcon;
6903 u16 cid, len;
6904 __le16 psm;
6906 if (hcon->state != BT_CONNECTED) {
6907 BT_DBG("queueing pending rx skb");
6908 skb_queue_tail(&conn->pending_rx, skb);
6909 return;
6912 skb_pull(skb, L2CAP_HDR_SIZE);
6913 cid = __le16_to_cpu(lh->cid);
6914 len = __le16_to_cpu(lh->len);
6916 if (len != skb->len) {
6917 kfree_skb(skb);
6918 return;
6921 /* Since we can't actively block incoming LE connections we must
6922 * at least ensure that we ignore incoming data from them.
6924 if (hcon->type == LE_LINK &&
6925 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6926 bdaddr_dst_type(hcon))) {
6927 kfree_skb(skb);
6928 return;
6931 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6933 switch (cid) {
6934 case L2CAP_CID_SIGNALING:
6935 l2cap_sig_channel(conn, skb);
6936 break;
6938 case L2CAP_CID_CONN_LESS:
6939 psm = get_unaligned((__le16 *) skb->data);
6940 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6941 l2cap_conless_channel(conn, psm, skb);
6942 break;
6944 case L2CAP_CID_LE_SIGNALING:
6945 l2cap_le_sig_channel(conn, skb);
6946 break;
6948 default:
6949 l2cap_data_channel(conn, cid, skb);
6950 break;
6954 static void process_pending_rx(struct work_struct *work)
6956 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6957 pending_rx_work);
6958 struct sk_buff *skb;
6960 BT_DBG("");
6962 while ((skb = skb_dequeue(&conn->pending_rx)))
6963 l2cap_recv_frame(conn, skb);
6966 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6968 struct l2cap_conn *conn = hcon->l2cap_data;
6969 struct hci_chan *hchan;
6971 if (conn)
6972 return conn;
6974 hchan = hci_chan_create(hcon);
6975 if (!hchan)
6976 return NULL;
6978 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6979 if (!conn) {
6980 hci_chan_del(hchan);
6981 return NULL;
6984 kref_init(&conn->ref);
6985 hcon->l2cap_data = conn;
6986 conn->hcon = hci_conn_get(hcon);
6987 conn->hchan = hchan;
6989 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6991 switch (hcon->type) {
6992 case LE_LINK:
6993 if (hcon->hdev->le_mtu) {
6994 conn->mtu = hcon->hdev->le_mtu;
6995 break;
6997 /* fall through */
6998 default:
6999 conn->mtu = hcon->hdev->acl_mtu;
7000 break;
7003 conn->feat_mask = 0;
7005 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7007 if (hcon->type == ACL_LINK &&
7008 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7009 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7011 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7012 (bredr_sc_enabled(hcon->hdev) ||
7013 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7014 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7016 mutex_init(&conn->ident_lock);
7017 mutex_init(&conn->chan_lock);
7019 INIT_LIST_HEAD(&conn->chan_l);
7020 INIT_LIST_HEAD(&conn->users);
7022 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7024 skb_queue_head_init(&conn->pending_rx);
7025 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7026 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7028 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7030 return conn;
7033 static bool is_valid_psm(u16 psm, u8 dst_type) {
7034 if (!psm)
7035 return false;
7037 if (bdaddr_type_is_le(dst_type))
7038 return (psm <= 0x00ff);
7040 /* PSM must be odd and lsb of upper byte must be 0 */
7041 return ((psm & 0x0101) == 0x0001);
7044 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7045 bdaddr_t *dst, u8 dst_type)
7047 struct l2cap_conn *conn;
7048 struct hci_conn *hcon;
7049 struct hci_dev *hdev;
7050 int err;
7052 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7053 dst_type, __le16_to_cpu(psm));
7055 hdev = hci_get_route(dst, &chan->src);
7056 if (!hdev)
7057 return -EHOSTUNREACH;
7059 hci_dev_lock(hdev);
7061 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7062 chan->chan_type != L2CAP_CHAN_RAW) {
7063 err = -EINVAL;
7064 goto done;
7067 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7068 err = -EINVAL;
7069 goto done;
7072 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7073 err = -EINVAL;
7074 goto done;
7077 switch (chan->mode) {
7078 case L2CAP_MODE_BASIC:
7079 break;
7080 case L2CAP_MODE_LE_FLOWCTL:
7081 l2cap_le_flowctl_init(chan);
7082 break;
7083 case L2CAP_MODE_ERTM:
7084 case L2CAP_MODE_STREAMING:
7085 if (!disable_ertm)
7086 break;
7087 /* fall through */
7088 default:
7089 err = -EOPNOTSUPP;
7090 goto done;
7093 switch (chan->state) {
7094 case BT_CONNECT:
7095 case BT_CONNECT2:
7096 case BT_CONFIG:
7097 /* Already connecting */
7098 err = 0;
7099 goto done;
7101 case BT_CONNECTED:
7102 /* Already connected */
7103 err = -EISCONN;
7104 goto done;
7106 case BT_OPEN:
7107 case BT_BOUND:
7108 /* Can connect */
7109 break;
7111 default:
7112 err = -EBADFD;
7113 goto done;
7116 /* Set destination address and psm */
7117 bacpy(&chan->dst, dst);
7118 chan->dst_type = dst_type;
7120 chan->psm = psm;
7121 chan->dcid = cid;
7123 if (bdaddr_type_is_le(dst_type)) {
7124 u8 role;
7126 /* Convert from L2CAP channel address type to HCI address type
7128 if (dst_type == BDADDR_LE_PUBLIC)
7129 dst_type = ADDR_LE_DEV_PUBLIC;
7130 else
7131 dst_type = ADDR_LE_DEV_RANDOM;
7133 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7134 role = HCI_ROLE_SLAVE;
7135 else
7136 role = HCI_ROLE_MASTER;
7138 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7139 chan->sec_level,
7140 HCI_LE_CONN_TIMEOUT,
7141 role);
7142 } else {
7143 u8 auth_type = l2cap_get_auth_type(chan);
7144 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7147 if (IS_ERR(hcon)) {
7148 err = PTR_ERR(hcon);
7149 goto done;
7152 conn = l2cap_conn_add(hcon);
7153 if (!conn) {
7154 hci_conn_drop(hcon);
7155 err = -ENOMEM;
7156 goto done;
7159 mutex_lock(&conn->chan_lock);
7160 l2cap_chan_lock(chan);
7162 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7163 hci_conn_drop(hcon);
7164 err = -EBUSY;
7165 goto chan_unlock;
7168 /* Update source addr of the socket */
7169 bacpy(&chan->src, &hcon->src);
7170 chan->src_type = bdaddr_src_type(hcon);
7172 __l2cap_chan_add(conn, chan);
7174 /* l2cap_chan_add takes its own ref so we can drop this one */
7175 hci_conn_drop(hcon);
7177 l2cap_state_change(chan, BT_CONNECT);
7178 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7180 /* Release chan->sport so that it can be reused by other
7181 * sockets (as it's only used for listening sockets).
7183 write_lock(&chan_list_lock);
7184 chan->sport = 0;
7185 write_unlock(&chan_list_lock);
7187 if (hcon->state == BT_CONNECTED) {
7188 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7189 __clear_chan_timer(chan);
7190 if (l2cap_chan_check_security(chan, true))
7191 l2cap_state_change(chan, BT_CONNECTED);
7192 } else
7193 l2cap_do_start(chan);
7196 err = 0;
7198 chan_unlock:
7199 l2cap_chan_unlock(chan);
7200 mutex_unlock(&conn->chan_lock);
7201 done:
7202 hci_dev_unlock(hdev);
7203 hci_dev_put(hdev);
7204 return err;
7206 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7208 /* ---- L2CAP interface with lower layer (HCI) ---- */
7210 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7212 int exact = 0, lm1 = 0, lm2 = 0;
7213 struct l2cap_chan *c;
7215 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7217 /* Find listening sockets and check their link_mode */
7218 read_lock(&chan_list_lock);
7219 list_for_each_entry(c, &chan_list, global_l) {
7220 if (c->state != BT_LISTEN)
7221 continue;
7223 if (!bacmp(&c->src, &hdev->bdaddr)) {
7224 lm1 |= HCI_LM_ACCEPT;
7225 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7226 lm1 |= HCI_LM_MASTER;
7227 exact++;
7228 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7229 lm2 |= HCI_LM_ACCEPT;
7230 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7231 lm2 |= HCI_LM_MASTER;
7234 read_unlock(&chan_list_lock);
7236 return exact ? lm1 : lm2;
7239 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7240 * from an existing channel in the list or from the beginning of the
7241 * global list (by passing NULL as first parameter).
7243 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7244 struct hci_conn *hcon)
7246 u8 src_type = bdaddr_src_type(hcon);
7248 read_lock(&chan_list_lock);
7250 if (c)
7251 c = list_next_entry(c, global_l);
7252 else
7253 c = list_entry(chan_list.next, typeof(*c), global_l);
7255 list_for_each_entry_from(c, &chan_list, global_l) {
7256 if (c->chan_type != L2CAP_CHAN_FIXED)
7257 continue;
7258 if (c->state != BT_LISTEN)
7259 continue;
7260 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7261 continue;
7262 if (src_type != c->src_type)
7263 continue;
7265 l2cap_chan_hold(c);
7266 read_unlock(&chan_list_lock);
7267 return c;
7270 read_unlock(&chan_list_lock);
7272 return NULL;
7275 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7277 struct hci_dev *hdev = hcon->hdev;
7278 struct l2cap_conn *conn;
7279 struct l2cap_chan *pchan;
7280 u8 dst_type;
7282 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7283 return;
7285 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7287 if (status) {
7288 l2cap_conn_del(hcon, bt_to_errno(status));
7289 return;
7292 conn = l2cap_conn_add(hcon);
7293 if (!conn)
7294 return;
7296 dst_type = bdaddr_dst_type(hcon);
7298 /* If device is blocked, do not create channels for it */
7299 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7300 return;
7302 /* Find fixed channels and notify them of the new connection. We
7303 * use multiple individual lookups, continuing each time where
7304 * we left off, because the list lock would prevent calling the
7305 * potentially sleeping l2cap_chan_lock() function.
7307 pchan = l2cap_global_fixed_chan(NULL, hcon);
7308 while (pchan) {
7309 struct l2cap_chan *chan, *next;
7311 /* Client fixed channels should override server ones */
7312 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7313 goto next;
7315 l2cap_chan_lock(pchan);
7316 chan = pchan->ops->new_connection(pchan);
7317 if (chan) {
7318 bacpy(&chan->src, &hcon->src);
7319 bacpy(&chan->dst, &hcon->dst);
7320 chan->src_type = bdaddr_src_type(hcon);
7321 chan->dst_type = dst_type;
7323 __l2cap_chan_add(conn, chan);
7326 l2cap_chan_unlock(pchan);
7327 next:
7328 next = l2cap_global_fixed_chan(pchan, hcon);
7329 l2cap_chan_put(pchan);
7330 pchan = next;
7333 l2cap_conn_ready(conn);
7336 int l2cap_disconn_ind(struct hci_conn *hcon)
7338 struct l2cap_conn *conn = hcon->l2cap_data;
7340 BT_DBG("hcon %p", hcon);
7342 if (!conn)
7343 return HCI_ERROR_REMOTE_USER_TERM;
7344 return conn->disc_reason;
7347 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7349 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7350 return;
7352 BT_DBG("hcon %p reason %d", hcon, reason);
7354 l2cap_conn_del(hcon, bt_to_errno(reason));
7357 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7359 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7360 return;
7362 if (encrypt == 0x00) {
7363 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7364 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7365 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7366 chan->sec_level == BT_SECURITY_FIPS)
7367 l2cap_chan_close(chan, ECONNREFUSED);
7368 } else {
7369 if (chan->sec_level == BT_SECURITY_MEDIUM)
7370 __clear_chan_timer(chan);
7374 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7376 struct l2cap_conn *conn = hcon->l2cap_data;
7377 struct l2cap_chan *chan;
7379 if (!conn)
7380 return;
7382 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7384 mutex_lock(&conn->chan_lock);
7386 list_for_each_entry(chan, &conn->chan_l, list) {
7387 l2cap_chan_lock(chan);
7389 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7390 state_to_string(chan->state));
7392 if (chan->scid == L2CAP_CID_A2MP) {
7393 l2cap_chan_unlock(chan);
7394 continue;
7397 if (!status && encrypt)
7398 chan->sec_level = hcon->sec_level;
7400 if (!__l2cap_no_conn_pending(chan)) {
7401 l2cap_chan_unlock(chan);
7402 continue;
7405 if (!status && (chan->state == BT_CONNECTED ||
7406 chan->state == BT_CONFIG)) {
7407 chan->ops->resume(chan);
7408 l2cap_check_encryption(chan, encrypt);
7409 l2cap_chan_unlock(chan);
7410 continue;
7413 if (chan->state == BT_CONNECT) {
7414 if (!status)
7415 l2cap_start_connection(chan);
7416 else
7417 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7418 } else if (chan->state == BT_CONNECT2 &&
7419 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7420 struct l2cap_conn_rsp rsp;
7421 __u16 res, stat;
7423 if (!status) {
7424 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7425 res = L2CAP_CR_PEND;
7426 stat = L2CAP_CS_AUTHOR_PEND;
7427 chan->ops->defer(chan);
7428 } else {
7429 l2cap_state_change(chan, BT_CONFIG);
7430 res = L2CAP_CR_SUCCESS;
7431 stat = L2CAP_CS_NO_INFO;
7433 } else {
7434 l2cap_state_change(chan, BT_DISCONN);
7435 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7436 res = L2CAP_CR_SEC_BLOCK;
7437 stat = L2CAP_CS_NO_INFO;
7440 rsp.scid = cpu_to_le16(chan->dcid);
7441 rsp.dcid = cpu_to_le16(chan->scid);
7442 rsp.result = cpu_to_le16(res);
7443 rsp.status = cpu_to_le16(stat);
7444 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7445 sizeof(rsp), &rsp);
7447 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7448 res == L2CAP_CR_SUCCESS) {
7449 char buf[128];
7450 set_bit(CONF_REQ_SENT, &chan->conf_state);
7451 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7452 L2CAP_CONF_REQ,
7453 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7454 buf);
7455 chan->num_conf_req++;
7459 l2cap_chan_unlock(chan);
7462 mutex_unlock(&conn->chan_lock);
7465 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7467 struct l2cap_conn *conn = hcon->l2cap_data;
7468 struct l2cap_hdr *hdr;
7469 int len;
7471 /* For AMP controller do not create l2cap conn */
7472 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7473 goto drop;
7475 if (!conn)
7476 conn = l2cap_conn_add(hcon);
7478 if (!conn)
7479 goto drop;
7481 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7483 switch (flags) {
7484 case ACL_START:
7485 case ACL_START_NO_FLUSH:
7486 case ACL_COMPLETE:
7487 if (conn->rx_len) {
7488 BT_ERR("Unexpected start frame (len %d)", skb->len);
7489 kfree_skb(conn->rx_skb);
7490 conn->rx_skb = NULL;
7491 conn->rx_len = 0;
7492 l2cap_conn_unreliable(conn, ECOMM);
7495 /* Start fragment always begin with Basic L2CAP header */
7496 if (skb->len < L2CAP_HDR_SIZE) {
7497 BT_ERR("Frame is too short (len %d)", skb->len);
7498 l2cap_conn_unreliable(conn, ECOMM);
7499 goto drop;
7502 hdr = (struct l2cap_hdr *) skb->data;
7503 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7505 if (len == skb->len) {
7506 /* Complete frame received */
7507 l2cap_recv_frame(conn, skb);
7508 return;
7511 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7513 if (skb->len > len) {
7514 BT_ERR("Frame is too long (len %d, expected len %d)",
7515 skb->len, len);
7516 l2cap_conn_unreliable(conn, ECOMM);
7517 goto drop;
7520 /* Allocate skb for the complete frame (with header) */
7521 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7522 if (!conn->rx_skb)
7523 goto drop;
7525 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7526 skb->len);
7527 conn->rx_len = len - skb->len;
7528 break;
7530 case ACL_CONT:
7531 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7533 if (!conn->rx_len) {
7534 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7535 l2cap_conn_unreliable(conn, ECOMM);
7536 goto drop;
7539 if (skb->len > conn->rx_len) {
7540 BT_ERR("Fragment is too long (len %d, expected %d)",
7541 skb->len, conn->rx_len);
7542 kfree_skb(conn->rx_skb);
7543 conn->rx_skb = NULL;
7544 conn->rx_len = 0;
7545 l2cap_conn_unreliable(conn, ECOMM);
7546 goto drop;
7549 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7550 skb->len);
7551 conn->rx_len -= skb->len;
7553 if (!conn->rx_len) {
7554 /* Complete frame received. l2cap_recv_frame
7555 * takes ownership of the skb so set the global
7556 * rx_skb pointer to NULL first.
7558 struct sk_buff *rx_skb = conn->rx_skb;
7559 conn->rx_skb = NULL;
7560 l2cap_recv_frame(conn, rx_skb);
7562 break;
7565 drop:
7566 kfree_skb(skb);
7569 static struct hci_cb l2cap_cb = {
7570 .name = "L2CAP",
7571 .connect_cfm = l2cap_connect_cfm,
7572 .disconn_cfm = l2cap_disconn_cfm,
7573 .security_cfm = l2cap_security_cfm,
7576 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7578 struct l2cap_chan *c;
7580 read_lock(&chan_list_lock);
7582 list_for_each_entry(c, &chan_list, global_l) {
7583 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7584 &c->src, c->src_type, &c->dst, c->dst_type,
7585 c->state, __le16_to_cpu(c->psm),
7586 c->scid, c->dcid, c->imtu, c->omtu,
7587 c->sec_level, c->mode);
7590 read_unlock(&chan_list_lock);
7592 return 0;
7595 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7597 return single_open(file, l2cap_debugfs_show, inode->i_private);
7600 static const struct file_operations l2cap_debugfs_fops = {
7601 .open = l2cap_debugfs_open,
7602 .read = seq_read,
7603 .llseek = seq_lseek,
7604 .release = single_release,
7607 static struct dentry *l2cap_debugfs;
7609 int __init l2cap_init(void)
7611 int err;
7613 err = l2cap_init_sockets();
7614 if (err < 0)
7615 return err;
7617 hci_register_cb(&l2cap_cb);
7619 if (IS_ERR_OR_NULL(bt_debugfs))
7620 return 0;
7622 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7623 NULL, &l2cap_debugfs_fops);
7625 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7626 &le_max_credits);
7627 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7628 &le_default_mps);
7630 return 0;
7633 void l2cap_exit(void)
7635 debugfs_remove(l2cap_debugfs);
7636 hci_unregister_cb(&l2cap_cb);
7637 l2cap_cleanup_sockets();
7640 module_param(disable_ertm, bool, 0644);
7641 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");