ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / net / bluetooth / l2cap_core.c
blob195459a1e53ef40a07ae40b80f6fae41da780103
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
45 #define LE_FLOWCTL_MAX_CREDITS 65535
47 bool disable_ertm;
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 struct sk_buff_head *skbs, u8 event);
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 if (link_type == LE_LINK) {
67 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 return BDADDR_LE_PUBLIC;
69 else
70 return BDADDR_LE_RANDOM;
73 return BDADDR_BREDR;
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 return bdaddr_type(hcon->type, hcon->src_type);
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 return bdaddr_type(hcon->type, hcon->dst_type);
86 /* ---- L2CAP channels ---- */
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 u16 cid)
91 struct l2cap_chan *c;
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->dcid == cid)
95 return c;
97 return NULL;
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 u16 cid)
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
106 if (c->scid == cid)
107 return c;
109 return NULL;
112 /* Find channel with given SCID.
113 * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 u16 cid)
117 struct l2cap_chan *c;
119 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 l2cap_chan_lock(c);
123 mutex_unlock(&conn->chan_lock);
125 return c;
128 /* Find channel with given DCID.
129 * Returns locked channel.
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 u16 cid)
134 struct l2cap_chan *c;
136 mutex_lock(&conn->chan_lock);
137 c = __l2cap_get_chan_by_dcid(conn, cid);
138 if (c)
139 l2cap_chan_lock(c);
140 mutex_unlock(&conn->chan_lock);
142 return c;
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &conn->chan_l, list) {
151 if (c->ident == ident)
152 return c;
154 return NULL;
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 u8 ident)
160 struct l2cap_chan *c;
162 mutex_lock(&conn->chan_lock);
163 c = __l2cap_get_chan_by_ident(conn, ident);
164 if (c)
165 l2cap_chan_lock(c);
166 mutex_unlock(&conn->chan_lock);
168 return c;
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
172 u8 src_type)
174 struct l2cap_chan *c;
176 list_for_each_entry(c, &chan_list, global_l) {
177 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
178 continue;
180 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
181 continue;
183 if (c->sport == psm && !bacmp(&c->src, src))
184 return c;
186 return NULL;
189 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 int err;
193 write_lock(&chan_list_lock);
195 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
196 err = -EADDRINUSE;
197 goto done;
200 if (psm) {
201 chan->psm = psm;
202 chan->sport = psm;
203 err = 0;
204 } else {
205 u16 p, start, end, incr;
207 if (chan->src_type == BDADDR_BREDR) {
208 start = L2CAP_PSM_DYN_START;
209 end = L2CAP_PSM_AUTO_END;
210 incr = 2;
211 } else {
212 start = L2CAP_PSM_LE_DYN_START;
213 end = L2CAP_PSM_LE_DYN_END;
214 incr = 1;
217 err = -EINVAL;
218 for (p = start; p <= end; p += incr)
219 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
220 chan->src_type)) {
221 chan->psm = cpu_to_le16(p);
222 chan->sport = cpu_to_le16(p);
223 err = 0;
224 break;
228 done:
229 write_unlock(&chan_list_lock);
230 return err;
232 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
236 write_lock(&chan_list_lock);
238 /* Override the defaults (which are for conn-oriented) */
239 chan->omtu = L2CAP_DEFAULT_MTU;
240 chan->chan_type = L2CAP_CHAN_FIXED;
242 chan->scid = scid;
244 write_unlock(&chan_list_lock);
246 return 0;
249 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 u16 cid, dyn_end;
253 if (conn->hcon->type == LE_LINK)
254 dyn_end = L2CAP_CID_LE_DYN_END;
255 else
256 dyn_end = L2CAP_CID_DYN_END;
258 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
259 if (!__l2cap_get_chan_by_scid(conn, cid))
260 return cid;
263 return 0;
266 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
269 state_to_string(state));
271 chan->state = state;
272 chan->ops->state_change(chan, state, 0);
275 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
276 int state, int err)
278 chan->state = state;
279 chan->ops->state_change(chan, chan->state, err);
282 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 chan->ops->state_change(chan, chan->state, err);
287 static void __set_retrans_timer(struct l2cap_chan *chan)
289 if (!delayed_work_pending(&chan->monitor_timer) &&
290 chan->retrans_timeout) {
291 l2cap_set_timer(chan, &chan->retrans_timer,
292 msecs_to_jiffies(chan->retrans_timeout));
296 static void __set_monitor_timer(struct l2cap_chan *chan)
298 __clear_retrans_timer(chan);
299 if (chan->monitor_timeout) {
300 l2cap_set_timer(chan, &chan->monitor_timer,
301 msecs_to_jiffies(chan->monitor_timeout));
305 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
306 u16 seq)
308 struct sk_buff *skb;
310 skb_queue_walk(head, skb) {
311 if (bt_cb(skb)->l2cap.txseq == seq)
312 return skb;
315 return NULL;
318 /* ---- L2CAP sequence number lists ---- */
320 /* For ERTM, ordered lists of sequence numbers must be tracked for
321 * SREJ requests that are received and for frames that are to be
322 * retransmitted. These seq_list functions implement a singly-linked
323 * list in an array, where membership in the list can also be checked
324 * in constant time. Items can also be added to the tail of the list
325 * and removed from the head in constant time, without further memory
326 * allocs or frees.
329 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 size_t alloc_size, i;
333 /* Allocated size is a power of 2 to map sequence numbers
334 * (which may be up to 14 bits) in to a smaller array that is
335 * sized for the negotiated ERTM transmit windows.
337 alloc_size = roundup_pow_of_two(size);
339 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
340 if (!seq_list->list)
341 return -ENOMEM;
343 seq_list->mask = alloc_size - 1;
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 for (i = 0; i < alloc_size; i++)
347 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 return 0;
352 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 kfree(seq_list->list);
357 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
358 u16 seq)
360 /* Constant-time check for list membership */
361 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
364 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 u16 seq = seq_list->head;
367 u16 mask = seq_list->mask;
369 seq_list->head = seq_list->list[seq & mask];
370 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
373 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
377 return seq;
380 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 u16 i;
384 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
385 return;
387 for (i = 0; i <= seq_list->mask; i++)
388 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
394 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 u16 mask = seq_list->mask;
398 /* All appends happen in constant time */
400 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
401 return;
403 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
404 seq_list->head = seq;
405 else
406 seq_list->list[seq_list->tail & mask] = seq;
408 seq_list->tail = seq;
409 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
412 static void l2cap_chan_timeout(struct work_struct *work)
414 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
415 chan_timer.work);
416 struct l2cap_conn *conn = chan->conn;
417 int reason;
419 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 mutex_lock(&conn->chan_lock);
422 l2cap_chan_lock(chan);
424 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
425 reason = ECONNREFUSED;
426 else if (chan->state == BT_CONNECT &&
427 chan->sec_level != BT_SECURITY_SDP)
428 reason = ECONNREFUSED;
429 else
430 reason = ETIMEDOUT;
432 l2cap_chan_close(chan, reason);
434 l2cap_chan_unlock(chan);
436 chan->ops->close(chan);
437 mutex_unlock(&conn->chan_lock);
439 l2cap_chan_put(chan);
442 struct l2cap_chan *l2cap_chan_create(void)
444 struct l2cap_chan *chan;
446 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
447 if (!chan)
448 return NULL;
450 mutex_init(&chan->lock);
452 /* Set default lock nesting level */
453 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 write_lock(&chan_list_lock);
456 list_add(&chan->global_l, &chan_list);
457 write_unlock(&chan_list_lock);
459 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
461 chan->state = BT_OPEN;
463 kref_init(&chan->kref);
465 /* This flag is cleared in l2cap_chan_ready() */
466 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
468 BT_DBG("chan %p", chan);
470 return chan;
472 EXPORT_SYMBOL_GPL(l2cap_chan_create);
474 static void l2cap_chan_destroy(struct kref *kref)
476 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
478 BT_DBG("chan %p", chan);
480 write_lock(&chan_list_lock);
481 list_del(&chan->global_l);
482 write_unlock(&chan_list_lock);
484 kfree(chan);
487 void l2cap_chan_hold(struct l2cap_chan *c)
489 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
491 kref_get(&c->kref);
494 void l2cap_chan_put(struct l2cap_chan *c)
496 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
498 kref_put(&c->kref, l2cap_chan_destroy);
500 EXPORT_SYMBOL_GPL(l2cap_chan_put);
502 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
504 chan->fcs = L2CAP_FCS_CRC16;
505 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
506 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
507 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
508 chan->remote_max_tx = chan->max_tx;
509 chan->remote_tx_win = chan->tx_win;
510 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
511 chan->sec_level = BT_SECURITY_LOW;
512 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
514 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
515 chan->conf_state = 0;
517 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
519 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
521 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
523 chan->sdu = NULL;
524 chan->sdu_last_frag = NULL;
525 chan->sdu_len = 0;
526 chan->tx_credits = tx_credits;
527 /* Derive MPS from connection MTU to stop HCI fragmentation */
528 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
529 /* Give enough credits for a full packet */
530 chan->rx_credits = (chan->imtu / chan->mps) + 1;
532 skb_queue_head_init(&chan->tx_q);
535 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
537 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
538 __le16_to_cpu(chan->psm), chan->dcid);
540 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
542 chan->conn = conn;
544 switch (chan->chan_type) {
545 case L2CAP_CHAN_CONN_ORIENTED:
546 /* Alloc CID for connection-oriented socket */
547 chan->scid = l2cap_alloc_cid(conn);
548 if (conn->hcon->type == ACL_LINK)
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 break;
552 case L2CAP_CHAN_CONN_LESS:
553 /* Connectionless socket */
554 chan->scid = L2CAP_CID_CONN_LESS;
555 chan->dcid = L2CAP_CID_CONN_LESS;
556 chan->omtu = L2CAP_DEFAULT_MTU;
557 break;
559 case L2CAP_CHAN_FIXED:
560 /* Caller will set CID and CID specific MTU values */
561 break;
563 default:
564 /* Raw socket can send/recv signalling messages only */
565 chan->scid = L2CAP_CID_SIGNALING;
566 chan->dcid = L2CAP_CID_SIGNALING;
567 chan->omtu = L2CAP_DEFAULT_MTU;
570 chan->local_id = L2CAP_BESTEFFORT_ID;
571 chan->local_stype = L2CAP_SERV_BESTEFFORT;
572 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
573 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
574 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
575 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
577 l2cap_chan_hold(chan);
579 /* Only keep a reference for fixed channels if they requested it */
580 if (chan->chan_type != L2CAP_CHAN_FIXED ||
581 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
582 hci_conn_hold(conn->hcon);
584 list_add(&chan->list, &conn->chan_l);
587 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
589 mutex_lock(&conn->chan_lock);
590 __l2cap_chan_add(conn, chan);
591 mutex_unlock(&conn->chan_lock);
594 void l2cap_chan_del(struct l2cap_chan *chan, int err)
596 struct l2cap_conn *conn = chan->conn;
598 __clear_chan_timer(chan);
600 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
601 state_to_string(chan->state));
603 chan->ops->teardown(chan, err);
605 if (conn) {
606 struct amp_mgr *mgr = conn->hcon->amp_mgr;
607 /* Delete from channel list */
608 list_del(&chan->list);
610 l2cap_chan_put(chan);
612 chan->conn = NULL;
614 /* Reference was only held for non-fixed channels or
615 * fixed channels that explicitly requested it using the
616 * FLAG_HOLD_HCI_CONN flag.
618 if (chan->chan_type != L2CAP_CHAN_FIXED ||
619 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
620 hci_conn_drop(conn->hcon);
622 if (mgr && mgr->bredr_chan == chan)
623 mgr->bredr_chan = NULL;
626 if (chan->hs_hchan) {
627 struct hci_chan *hs_hchan = chan->hs_hchan;
629 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
630 amp_disconnect_logical_link(hs_hchan);
633 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
634 return;
636 switch(chan->mode) {
637 case L2CAP_MODE_BASIC:
638 break;
640 case L2CAP_MODE_LE_FLOWCTL:
641 skb_queue_purge(&chan->tx_q);
642 break;
644 case L2CAP_MODE_ERTM:
645 __clear_retrans_timer(chan);
646 __clear_monitor_timer(chan);
647 __clear_ack_timer(chan);
649 skb_queue_purge(&chan->srej_q);
651 l2cap_seq_list_free(&chan->srej_list);
652 l2cap_seq_list_free(&chan->retrans_list);
654 /* fall through */
656 case L2CAP_MODE_STREAMING:
657 skb_queue_purge(&chan->tx_q);
658 break;
661 return;
663 EXPORT_SYMBOL_GPL(l2cap_chan_del);
665 static void l2cap_conn_update_id_addr(struct work_struct *work)
667 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
668 id_addr_update_work);
669 struct hci_conn *hcon = conn->hcon;
670 struct l2cap_chan *chan;
672 mutex_lock(&conn->chan_lock);
674 list_for_each_entry(chan, &conn->chan_l, list) {
675 l2cap_chan_lock(chan);
676 bacpy(&chan->dst, &hcon->dst);
677 chan->dst_type = bdaddr_dst_type(hcon);
678 l2cap_chan_unlock(chan);
681 mutex_unlock(&conn->chan_lock);
684 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
687 struct l2cap_le_conn_rsp rsp;
688 u16 result;
690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
691 result = L2CAP_CR_LE_AUTHORIZATION;
692 else
693 result = L2CAP_CR_LE_BAD_PSM;
695 l2cap_state_change(chan, BT_DISCONN);
697 rsp.dcid = cpu_to_le16(chan->scid);
698 rsp.mtu = cpu_to_le16(chan->imtu);
699 rsp.mps = cpu_to_le16(chan->mps);
700 rsp.credits = cpu_to_le16(chan->rx_credits);
701 rsp.result = cpu_to_le16(result);
703 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
704 &rsp);
707 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_conn_rsp rsp;
711 u16 result;
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_SEC_BLOCK;
715 else
716 result = L2CAP_CR_BAD_PSM;
718 l2cap_state_change(chan, BT_DISCONN);
720 rsp.scid = cpu_to_le16(chan->dcid);
721 rsp.dcid = cpu_to_le16(chan->scid);
722 rsp.result = cpu_to_le16(result);
723 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
725 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
728 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
730 struct l2cap_conn *conn = chan->conn;
732 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
734 switch (chan->state) {
735 case BT_LISTEN:
736 chan->ops->teardown(chan, 0);
737 break;
739 case BT_CONNECTED:
740 case BT_CONFIG:
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
743 l2cap_send_disconn_req(chan, reason);
744 } else
745 l2cap_chan_del(chan, reason);
746 break;
748 case BT_CONNECT2:
749 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
750 if (conn->hcon->type == ACL_LINK)
751 l2cap_chan_connect_reject(chan);
752 else if (conn->hcon->type == LE_LINK)
753 l2cap_chan_le_connect_reject(chan);
756 l2cap_chan_del(chan, reason);
757 break;
759 case BT_CONNECT:
760 case BT_DISCONN:
761 l2cap_chan_del(chan, reason);
762 break;
764 default:
765 chan->ops->teardown(chan, 0);
766 break;
769 EXPORT_SYMBOL(l2cap_chan_close);
771 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
773 switch (chan->chan_type) {
774 case L2CAP_CHAN_RAW:
775 switch (chan->sec_level) {
776 case BT_SECURITY_HIGH:
777 case BT_SECURITY_FIPS:
778 return HCI_AT_DEDICATED_BONDING_MITM;
779 case BT_SECURITY_MEDIUM:
780 return HCI_AT_DEDICATED_BONDING;
781 default:
782 return HCI_AT_NO_BONDING;
784 break;
785 case L2CAP_CHAN_CONN_LESS:
786 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
787 if (chan->sec_level == BT_SECURITY_LOW)
788 chan->sec_level = BT_SECURITY_SDP;
790 if (chan->sec_level == BT_SECURITY_HIGH ||
791 chan->sec_level == BT_SECURITY_FIPS)
792 return HCI_AT_NO_BONDING_MITM;
793 else
794 return HCI_AT_NO_BONDING;
795 break;
796 case L2CAP_CHAN_CONN_ORIENTED:
797 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
798 if (chan->sec_level == BT_SECURITY_LOW)
799 chan->sec_level = BT_SECURITY_SDP;
801 if (chan->sec_level == BT_SECURITY_HIGH ||
802 chan->sec_level == BT_SECURITY_FIPS)
803 return HCI_AT_NO_BONDING_MITM;
804 else
805 return HCI_AT_NO_BONDING;
807 /* fall through */
808 default:
809 switch (chan->sec_level) {
810 case BT_SECURITY_HIGH:
811 case BT_SECURITY_FIPS:
812 return HCI_AT_GENERAL_BONDING_MITM;
813 case BT_SECURITY_MEDIUM:
814 return HCI_AT_GENERAL_BONDING;
815 default:
816 return HCI_AT_NO_BONDING;
818 break;
822 /* Service level security */
823 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
825 struct l2cap_conn *conn = chan->conn;
826 __u8 auth_type;
828 if (conn->hcon->type == LE_LINK)
829 return smp_conn_security(conn->hcon, chan->sec_level);
831 auth_type = l2cap_get_auth_type(chan);
833 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
834 initiator);
837 static u8 l2cap_get_ident(struct l2cap_conn *conn)
839 u8 id;
841 /* Get next available identificator.
842 * 1 - 128 are used by kernel.
843 * 129 - 199 are reserved.
844 * 200 - 254 are used by utilities like l2ping, etc.
847 mutex_lock(&conn->ident_lock);
849 if (++conn->tx_ident > 128)
850 conn->tx_ident = 1;
852 id = conn->tx_ident;
854 mutex_unlock(&conn->ident_lock);
856 return id;
859 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
860 void *data)
862 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
863 u8 flags;
865 BT_DBG("code 0x%2.2x", code);
867 if (!skb)
868 return;
870 /* Use NO_FLUSH if supported or we have an LE link (which does
871 * not support auto-flushing packets) */
872 if (lmp_no_flush_capable(conn->hcon->hdev) ||
873 conn->hcon->type == LE_LINK)
874 flags = ACL_START_NO_FLUSH;
875 else
876 flags = ACL_START;
878 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
879 skb->priority = HCI_PRIO_MAX;
881 hci_send_acl(conn->hchan, skb, flags);
884 static bool __chan_is_moving(struct l2cap_chan *chan)
886 return chan->move_state != L2CAP_MOVE_STABLE &&
887 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
890 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
892 struct hci_conn *hcon = chan->conn->hcon;
893 u16 flags;
895 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
896 skb->priority);
898 if (chan->hs_hcon && !__chan_is_moving(chan)) {
899 if (chan->hs_hchan)
900 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
901 else
902 kfree_skb(skb);
904 return;
907 /* Use NO_FLUSH for LE links (where this is the only option) or
908 * if the BR/EDR link supports it and flushing has not been
909 * explicitly requested (through FLAG_FLUSHABLE).
911 if (hcon->type == LE_LINK ||
912 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
913 lmp_no_flush_capable(hcon->hdev)))
914 flags = ACL_START_NO_FLUSH;
915 else
916 flags = ACL_START;
918 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
919 hci_send_acl(chan->conn->hchan, skb, flags);
922 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
924 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
925 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
927 if (enh & L2CAP_CTRL_FRAME_TYPE) {
928 /* S-Frame */
929 control->sframe = 1;
930 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
931 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
933 control->sar = 0;
934 control->txseq = 0;
935 } else {
936 /* I-Frame */
937 control->sframe = 0;
938 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
939 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
941 control->poll = 0;
942 control->super = 0;
946 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
948 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
949 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
951 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
952 /* S-Frame */
953 control->sframe = 1;
954 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
955 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
957 control->sar = 0;
958 control->txseq = 0;
959 } else {
960 /* I-Frame */
961 control->sframe = 0;
962 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
963 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
965 control->poll = 0;
966 control->super = 0;
970 static inline void __unpack_control(struct l2cap_chan *chan,
971 struct sk_buff *skb)
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 __unpack_extended_control(get_unaligned_le32(skb->data),
975 &bt_cb(skb)->l2cap);
976 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
977 } else {
978 __unpack_enhanced_control(get_unaligned_le16(skb->data),
979 &bt_cb(skb)->l2cap);
980 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
984 static u32 __pack_extended_control(struct l2cap_ctrl *control)
986 u32 packed;
988 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
989 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
991 if (control->sframe) {
992 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
993 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
994 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
995 } else {
996 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
997 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1000 return packed;
1003 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1005 u16 packed;
1007 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1008 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1010 if (control->sframe) {
1011 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1012 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1013 packed |= L2CAP_CTRL_FRAME_TYPE;
1014 } else {
1015 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1016 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1019 return packed;
1022 static inline void __pack_control(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control,
1024 struct sk_buff *skb)
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 put_unaligned_le32(__pack_extended_control(control),
1028 skb->data + L2CAP_HDR_SIZE);
1029 } else {
1030 put_unaligned_le16(__pack_enhanced_control(control),
1031 skb->data + L2CAP_HDR_SIZE);
1035 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1037 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1038 return L2CAP_EXT_HDR_SIZE;
1039 else
1040 return L2CAP_ENH_HDR_SIZE;
1043 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1044 u32 control)
1046 struct sk_buff *skb;
1047 struct l2cap_hdr *lh;
1048 int hlen = __ertm_hdr_size(chan);
1050 if (chan->fcs == L2CAP_FCS_CRC16)
1051 hlen += L2CAP_FCS_SIZE;
1053 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1055 if (!skb)
1056 return ERR_PTR(-ENOMEM);
1058 lh = skb_put(skb, L2CAP_HDR_SIZE);
1059 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1060 lh->cid = cpu_to_le16(chan->dcid);
1062 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1063 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1064 else
1065 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1067 if (chan->fcs == L2CAP_FCS_CRC16) {
1068 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1069 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1072 skb->priority = HCI_PRIO_MAX;
1073 return skb;
1076 static void l2cap_send_sframe(struct l2cap_chan *chan,
1077 struct l2cap_ctrl *control)
1079 struct sk_buff *skb;
1080 u32 control_field;
1082 BT_DBG("chan %p, control %p", chan, control);
1084 if (!control->sframe)
1085 return;
1087 if (__chan_is_moving(chan))
1088 return;
1090 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1091 !control->poll)
1092 control->final = 1;
1094 if (control->super == L2CAP_SUPER_RR)
1095 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1096 else if (control->super == L2CAP_SUPER_RNR)
1097 set_bit(CONN_RNR_SENT, &chan->conn_state);
1099 if (control->super != L2CAP_SUPER_SREJ) {
1100 chan->last_acked_seq = control->reqseq;
1101 __clear_ack_timer(chan);
1104 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1105 control->final, control->poll, control->super);
1107 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 control_field = __pack_extended_control(control);
1109 else
1110 control_field = __pack_enhanced_control(control);
1112 skb = l2cap_create_sframe_pdu(chan, control_field);
1113 if (!IS_ERR(skb))
1114 l2cap_do_send(chan, skb);
1117 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1119 struct l2cap_ctrl control;
1121 BT_DBG("chan %p, poll %d", chan, poll);
1123 memset(&control, 0, sizeof(control));
1124 control.sframe = 1;
1125 control.poll = poll;
1127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1128 control.super = L2CAP_SUPER_RNR;
1129 else
1130 control.super = L2CAP_SUPER_RR;
1132 control.reqseq = chan->buffer_seq;
1133 l2cap_send_sframe(chan, &control);
1136 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1139 return true;
1141 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1144 static bool __amp_capable(struct l2cap_chan *chan)
1146 struct l2cap_conn *conn = chan->conn;
1147 struct hci_dev *hdev;
1148 bool amp_available = false;
1150 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1151 return false;
1153 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1154 return false;
1156 read_lock(&hci_dev_list_lock);
1157 list_for_each_entry(hdev, &hci_dev_list, list) {
1158 if (hdev->amp_type != AMP_TYPE_BREDR &&
1159 test_bit(HCI_UP, &hdev->flags)) {
1160 amp_available = true;
1161 break;
1164 read_unlock(&hci_dev_list_lock);
1166 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1167 return amp_available;
1169 return false;
1172 static bool l2cap_check_efs(struct l2cap_chan *chan)
1174 /* Check EFS parameters */
1175 return true;
1178 void l2cap_send_conn_req(struct l2cap_chan *chan)
1180 struct l2cap_conn *conn = chan->conn;
1181 struct l2cap_conn_req req;
1183 req.scid = cpu_to_le16(chan->scid);
1184 req.psm = chan->psm;
1186 chan->ident = l2cap_get_ident(conn);
1188 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1190 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1193 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1195 struct l2cap_create_chan_req req;
1196 req.scid = cpu_to_le16(chan->scid);
1197 req.psm = chan->psm;
1198 req.amp_id = amp_id;
1200 chan->ident = l2cap_get_ident(chan->conn);
1202 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1203 sizeof(req), &req);
1206 static void l2cap_move_setup(struct l2cap_chan *chan)
1208 struct sk_buff *skb;
1210 BT_DBG("chan %p", chan);
1212 if (chan->mode != L2CAP_MODE_ERTM)
1213 return;
1215 __clear_retrans_timer(chan);
1216 __clear_monitor_timer(chan);
1217 __clear_ack_timer(chan);
1219 chan->retry_count = 0;
1220 skb_queue_walk(&chan->tx_q, skb) {
1221 if (bt_cb(skb)->l2cap.retries)
1222 bt_cb(skb)->l2cap.retries = 1;
1223 else
1224 break;
1227 chan->expected_tx_seq = chan->buffer_seq;
1229 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1230 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1231 l2cap_seq_list_clear(&chan->retrans_list);
1232 l2cap_seq_list_clear(&chan->srej_list);
1233 skb_queue_purge(&chan->srej_q);
1235 chan->tx_state = L2CAP_TX_STATE_XMIT;
1236 chan->rx_state = L2CAP_RX_STATE_MOVE;
1238 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1241 static void l2cap_move_done(struct l2cap_chan *chan)
1243 u8 move_role = chan->move_role;
1244 BT_DBG("chan %p", chan);
1246 chan->move_state = L2CAP_MOVE_STABLE;
1247 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1249 if (chan->mode != L2CAP_MODE_ERTM)
1250 return;
1252 switch (move_role) {
1253 case L2CAP_MOVE_ROLE_INITIATOR:
1254 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1255 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1256 break;
1257 case L2CAP_MOVE_ROLE_RESPONDER:
1258 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1259 break;
1263 static void l2cap_chan_ready(struct l2cap_chan *chan)
1265 /* The channel may have already been flagged as connected in
1266 * case of receiving data before the L2CAP info req/rsp
1267 * procedure is complete.
1269 if (chan->state == BT_CONNECTED)
1270 return;
1272 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 chan->conf_state = 0;
1274 __clear_chan_timer(chan);
1276 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1277 chan->ops->suspend(chan);
1279 chan->state = BT_CONNECTED;
1281 chan->ops->ready(chan);
1284 static void l2cap_le_connect(struct l2cap_chan *chan)
1286 struct l2cap_conn *conn = chan->conn;
1287 struct l2cap_le_conn_req req;
1289 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1290 return;
1292 if (!chan->imtu)
1293 chan->imtu = chan->conn->mtu;
1295 l2cap_le_flowctl_init(chan, 0);
1297 req.psm = chan->psm;
1298 req.scid = cpu_to_le16(chan->scid);
1299 req.mtu = cpu_to_le16(chan->imtu);
1300 req.mps = cpu_to_le16(chan->mps);
1301 req.credits = cpu_to_le16(chan->rx_credits);
1303 chan->ident = l2cap_get_ident(conn);
1305 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1306 sizeof(req), &req);
1309 static void l2cap_le_start(struct l2cap_chan *chan)
1311 struct l2cap_conn *conn = chan->conn;
1313 if (!smp_conn_security(conn->hcon, chan->sec_level))
1314 return;
1316 if (!chan->psm) {
1317 l2cap_chan_ready(chan);
1318 return;
1321 if (chan->state == BT_CONNECT)
1322 l2cap_le_connect(chan);
1325 static void l2cap_start_connection(struct l2cap_chan *chan)
1327 if (__amp_capable(chan)) {
1328 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1329 a2mp_discover_amp(chan);
1330 } else if (chan->conn->hcon->type == LE_LINK) {
1331 l2cap_le_start(chan);
1332 } else {
1333 l2cap_send_conn_req(chan);
1337 static void l2cap_request_info(struct l2cap_conn *conn)
1339 struct l2cap_info_req req;
1341 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1342 return;
1344 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1346 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1347 conn->info_ident = l2cap_get_ident(conn);
1349 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1351 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1352 sizeof(req), &req);
1355 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1357 /* The minimum encryption key size needs to be enforced by the
1358 * host stack before establishing any L2CAP connections. The
1359 * specification in theory allows a minimum of 1, but to align
1360 * BR/EDR and LE transports, a minimum of 7 is chosen.
1362 * This check might also be called for unencrypted connections
1363 * that have no key size requirements. Ensure that the link is
1364 * actually encrypted before enforcing a key size.
1366 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1367 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1370 static void l2cap_do_start(struct l2cap_chan *chan)
1372 struct l2cap_conn *conn = chan->conn;
1374 if (conn->hcon->type == LE_LINK) {
1375 l2cap_le_start(chan);
1376 return;
1379 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1380 l2cap_request_info(conn);
1381 return;
1384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1385 return;
1387 if (!l2cap_chan_check_security(chan, true) ||
1388 !__l2cap_no_conn_pending(chan))
1389 return;
1391 if (l2cap_check_enc_key_size(conn->hcon))
1392 l2cap_start_connection(chan);
1393 else
1394 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1397 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1399 u32 local_feat_mask = l2cap_feat_mask;
1400 if (!disable_ertm)
1401 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1403 switch (mode) {
1404 case L2CAP_MODE_ERTM:
1405 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1406 case L2CAP_MODE_STREAMING:
1407 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1408 default:
1409 return 0x00;
1413 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1415 struct l2cap_conn *conn = chan->conn;
1416 struct l2cap_disconn_req req;
1418 if (!conn)
1419 return;
1421 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1422 __clear_retrans_timer(chan);
1423 __clear_monitor_timer(chan);
1424 __clear_ack_timer(chan);
1427 if (chan->scid == L2CAP_CID_A2MP) {
1428 l2cap_state_change(chan, BT_DISCONN);
1429 return;
1432 req.dcid = cpu_to_le16(chan->dcid);
1433 req.scid = cpu_to_le16(chan->scid);
1434 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1435 sizeof(req), &req);
1437 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1440 /* ---- L2CAP connections ---- */
1441 static void l2cap_conn_start(struct l2cap_conn *conn)
1443 struct l2cap_chan *chan, *tmp;
1445 BT_DBG("conn %p", conn);
1447 mutex_lock(&conn->chan_lock);
1449 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1450 l2cap_chan_lock(chan);
1452 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1453 l2cap_chan_ready(chan);
1454 l2cap_chan_unlock(chan);
1455 continue;
1458 if (chan->state == BT_CONNECT) {
1459 if (!l2cap_chan_check_security(chan, true) ||
1460 !__l2cap_no_conn_pending(chan)) {
1461 l2cap_chan_unlock(chan);
1462 continue;
1465 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1466 && test_bit(CONF_STATE2_DEVICE,
1467 &chan->conf_state)) {
1468 l2cap_chan_close(chan, ECONNRESET);
1469 l2cap_chan_unlock(chan);
1470 continue;
1473 if (l2cap_check_enc_key_size(conn->hcon))
1474 l2cap_start_connection(chan);
1475 else
1476 l2cap_chan_close(chan, ECONNREFUSED);
1478 } else if (chan->state == BT_CONNECT2) {
1479 struct l2cap_conn_rsp rsp;
1480 char buf[128];
1481 rsp.scid = cpu_to_le16(chan->dcid);
1482 rsp.dcid = cpu_to_le16(chan->scid);
1484 if (l2cap_chan_check_security(chan, false)) {
1485 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1486 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1487 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1488 chan->ops->defer(chan);
1490 } else {
1491 l2cap_state_change(chan, BT_CONFIG);
1492 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1493 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1495 } else {
1496 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1497 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1500 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1501 sizeof(rsp), &rsp);
1503 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1504 rsp.result != L2CAP_CR_SUCCESS) {
1505 l2cap_chan_unlock(chan);
1506 continue;
1509 set_bit(CONF_REQ_SENT, &chan->conf_state);
1510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1511 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1512 chan->num_conf_req++;
1515 l2cap_chan_unlock(chan);
1518 mutex_unlock(&conn->chan_lock);
1521 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1523 struct hci_conn *hcon = conn->hcon;
1524 struct hci_dev *hdev = hcon->hdev;
1526 BT_DBG("%s conn %p", hdev->name, conn);
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1531 if (hcon->out)
1532 smp_conn_security(hcon, hcon->pending_sec_level);
1534 /* For LE slave connections, make sure the connection interval
1535 * is in the range of the minium and maximum interval that has
1536 * been configured for this connection. If not, then trigger
1537 * the connection update procedure.
1539 if (hcon->role == HCI_ROLE_SLAVE &&
1540 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1541 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1542 struct l2cap_conn_param_update_req req;
1544 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1545 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1546 req.latency = cpu_to_le16(hcon->le_conn_latency);
1547 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1549 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1550 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1554 static void l2cap_conn_ready(struct l2cap_conn *conn)
1556 struct l2cap_chan *chan;
1557 struct hci_conn *hcon = conn->hcon;
1559 BT_DBG("conn %p", conn);
1561 if (hcon->type == ACL_LINK)
1562 l2cap_request_info(conn);
1564 mutex_lock(&conn->chan_lock);
1566 list_for_each_entry(chan, &conn->chan_l, list) {
1568 l2cap_chan_lock(chan);
1570 if (chan->scid == L2CAP_CID_A2MP) {
1571 l2cap_chan_unlock(chan);
1572 continue;
1575 if (hcon->type == LE_LINK) {
1576 l2cap_le_start(chan);
1577 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1578 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1579 l2cap_chan_ready(chan);
1580 } else if (chan->state == BT_CONNECT) {
1581 l2cap_do_start(chan);
1584 l2cap_chan_unlock(chan);
1587 mutex_unlock(&conn->chan_lock);
1589 if (hcon->type == LE_LINK)
1590 l2cap_le_conn_ready(conn);
1592 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1595 /* Notify sockets that we cannot guaranty reliability anymore */
1596 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1598 struct l2cap_chan *chan;
1600 BT_DBG("conn %p", conn);
1602 mutex_lock(&conn->chan_lock);
1604 list_for_each_entry(chan, &conn->chan_l, list) {
1605 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1606 l2cap_chan_set_err(chan, err);
1609 mutex_unlock(&conn->chan_lock);
1612 static void l2cap_info_timeout(struct work_struct *work)
1614 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1615 info_timer.work);
1617 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1618 conn->info_ident = 0;
1620 l2cap_conn_start(conn);
1624 * l2cap_user
1625 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1626 * callback is called during registration. The ->remove callback is called
1627 * during unregistration.
1628 * An l2cap_user object can either be explicitly unregistered or when the
1629 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1630 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1631 * External modules must own a reference to the l2cap_conn object if they intend
1632 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1633 * any time if they don't.
1636 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1638 struct hci_dev *hdev = conn->hcon->hdev;
1639 int ret;
1641 /* We need to check whether l2cap_conn is registered. If it is not, we
1642 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1643 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1644 * relies on the parent hci_conn object to be locked. This itself relies
1645 * on the hci_dev object to be locked. So we must lock the hci device
1646 * here, too. */
1648 hci_dev_lock(hdev);
1650 if (!list_empty(&user->list)) {
1651 ret = -EINVAL;
1652 goto out_unlock;
1655 /* conn->hchan is NULL after l2cap_conn_del() was called */
1656 if (!conn->hchan) {
1657 ret = -ENODEV;
1658 goto out_unlock;
1661 ret = user->probe(conn, user);
1662 if (ret)
1663 goto out_unlock;
1665 list_add(&user->list, &conn->users);
1666 ret = 0;
1668 out_unlock:
1669 hci_dev_unlock(hdev);
1670 return ret;
1672 EXPORT_SYMBOL(l2cap_register_user);
1674 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1676 struct hci_dev *hdev = conn->hcon->hdev;
1678 hci_dev_lock(hdev);
1680 if (list_empty(&user->list))
1681 goto out_unlock;
1683 list_del_init(&user->list);
1684 user->remove(conn, user);
1686 out_unlock:
1687 hci_dev_unlock(hdev);
1689 EXPORT_SYMBOL(l2cap_unregister_user);
1691 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1693 struct l2cap_user *user;
1695 while (!list_empty(&conn->users)) {
1696 user = list_first_entry(&conn->users, struct l2cap_user, list);
1697 list_del_init(&user->list);
1698 user->remove(conn, user);
1702 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1704 struct l2cap_conn *conn = hcon->l2cap_data;
1705 struct l2cap_chan *chan, *l;
1707 if (!conn)
1708 return;
1710 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1712 kfree_skb(conn->rx_skb);
1714 skb_queue_purge(&conn->pending_rx);
1716 /* We can not call flush_work(&conn->pending_rx_work) here since we
1717 * might block if we are running on a worker from the same workqueue
1718 * pending_rx_work is waiting on.
1720 if (work_pending(&conn->pending_rx_work))
1721 cancel_work_sync(&conn->pending_rx_work);
1723 if (work_pending(&conn->id_addr_update_work))
1724 cancel_work_sync(&conn->id_addr_update_work);
1726 l2cap_unregister_all_users(conn);
1728 /* Force the connection to be immediately dropped */
1729 hcon->disc_timeout = 0;
1731 mutex_lock(&conn->chan_lock);
1733 /* Kill channels */
1734 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1735 l2cap_chan_hold(chan);
1736 l2cap_chan_lock(chan);
1738 l2cap_chan_del(chan, err);
1740 l2cap_chan_unlock(chan);
1742 chan->ops->close(chan);
1743 l2cap_chan_put(chan);
1746 mutex_unlock(&conn->chan_lock);
1748 hci_chan_del(conn->hchan);
1750 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1751 cancel_delayed_work_sync(&conn->info_timer);
1753 hcon->l2cap_data = NULL;
1754 conn->hchan = NULL;
1755 l2cap_conn_put(conn);
1758 static void l2cap_conn_free(struct kref *ref)
1760 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1762 hci_conn_put(conn->hcon);
1763 kfree(conn);
1766 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1768 kref_get(&conn->ref);
1769 return conn;
1771 EXPORT_SYMBOL(l2cap_conn_get);
1773 void l2cap_conn_put(struct l2cap_conn *conn)
1775 kref_put(&conn->ref, l2cap_conn_free);
1777 EXPORT_SYMBOL(l2cap_conn_put);
1779 /* ---- Socket interface ---- */
1781 /* Find socket with psm and source / destination bdaddr.
1782 * Returns closest match.
1784 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1785 bdaddr_t *src,
1786 bdaddr_t *dst,
1787 u8 link_type)
1789 struct l2cap_chan *c, *c1 = NULL;
1791 read_lock(&chan_list_lock);
1793 list_for_each_entry(c, &chan_list, global_l) {
1794 if (state && c->state != state)
1795 continue;
1797 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1798 continue;
1800 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1801 continue;
1803 if (c->psm == psm) {
1804 int src_match, dst_match;
1805 int src_any, dst_any;
1807 /* Exact match. */
1808 src_match = !bacmp(&c->src, src);
1809 dst_match = !bacmp(&c->dst, dst);
1810 if (src_match && dst_match) {
1811 l2cap_chan_hold(c);
1812 read_unlock(&chan_list_lock);
1813 return c;
1816 /* Closest match */
1817 src_any = !bacmp(&c->src, BDADDR_ANY);
1818 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1819 if ((src_match && dst_any) || (src_any && dst_match) ||
1820 (src_any && dst_any))
1821 c1 = c;
1825 if (c1)
1826 l2cap_chan_hold(c1);
1828 read_unlock(&chan_list_lock);
1830 return c1;
1833 static void l2cap_monitor_timeout(struct work_struct *work)
1835 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1836 monitor_timer.work);
1838 BT_DBG("chan %p", chan);
1840 l2cap_chan_lock(chan);
1842 if (!chan->conn) {
1843 l2cap_chan_unlock(chan);
1844 l2cap_chan_put(chan);
1845 return;
1848 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1850 l2cap_chan_unlock(chan);
1851 l2cap_chan_put(chan);
1854 static void l2cap_retrans_timeout(struct work_struct *work)
1856 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1857 retrans_timer.work);
1859 BT_DBG("chan %p", chan);
1861 l2cap_chan_lock(chan);
1863 if (!chan->conn) {
1864 l2cap_chan_unlock(chan);
1865 l2cap_chan_put(chan);
1866 return;
1869 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1870 l2cap_chan_unlock(chan);
1871 l2cap_chan_put(chan);
1874 static void l2cap_streaming_send(struct l2cap_chan *chan,
1875 struct sk_buff_head *skbs)
1877 struct sk_buff *skb;
1878 struct l2cap_ctrl *control;
1880 BT_DBG("chan %p, skbs %p", chan, skbs);
1882 if (__chan_is_moving(chan))
1883 return;
1885 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1887 while (!skb_queue_empty(&chan->tx_q)) {
1889 skb = skb_dequeue(&chan->tx_q);
1891 bt_cb(skb)->l2cap.retries = 1;
1892 control = &bt_cb(skb)->l2cap;
1894 control->reqseq = 0;
1895 control->txseq = chan->next_tx_seq;
1897 __pack_control(chan, control, skb);
1899 if (chan->fcs == L2CAP_FCS_CRC16) {
1900 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1901 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1904 l2cap_do_send(chan, skb);
1906 BT_DBG("Sent txseq %u", control->txseq);
1908 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1909 chan->frames_sent++;
1913 static int l2cap_ertm_send(struct l2cap_chan *chan)
1915 struct sk_buff *skb, *tx_skb;
1916 struct l2cap_ctrl *control;
1917 int sent = 0;
1919 BT_DBG("chan %p", chan);
1921 if (chan->state != BT_CONNECTED)
1922 return -ENOTCONN;
1924 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1925 return 0;
1927 if (__chan_is_moving(chan))
1928 return 0;
1930 while (chan->tx_send_head &&
1931 chan->unacked_frames < chan->remote_tx_win &&
1932 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1934 skb = chan->tx_send_head;
1936 bt_cb(skb)->l2cap.retries = 1;
1937 control = &bt_cb(skb)->l2cap;
1939 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1940 control->final = 1;
1942 control->reqseq = chan->buffer_seq;
1943 chan->last_acked_seq = chan->buffer_seq;
1944 control->txseq = chan->next_tx_seq;
1946 __pack_control(chan, control, skb);
1948 if (chan->fcs == L2CAP_FCS_CRC16) {
1949 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1950 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1953 /* Clone after data has been modified. Data is assumed to be
1954 read-only (for locking purposes) on cloned sk_buffs.
1956 tx_skb = skb_clone(skb, GFP_KERNEL);
1958 if (!tx_skb)
1959 break;
1961 __set_retrans_timer(chan);
1963 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1964 chan->unacked_frames++;
1965 chan->frames_sent++;
1966 sent++;
1968 if (skb_queue_is_last(&chan->tx_q, skb))
1969 chan->tx_send_head = NULL;
1970 else
1971 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1973 l2cap_do_send(chan, tx_skb);
1974 BT_DBG("Sent txseq %u", control->txseq);
1977 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1978 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1980 return sent;
1983 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1985 struct l2cap_ctrl control;
1986 struct sk_buff *skb;
1987 struct sk_buff *tx_skb;
1988 u16 seq;
1990 BT_DBG("chan %p", chan);
1992 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1993 return;
1995 if (__chan_is_moving(chan))
1996 return;
1998 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1999 seq = l2cap_seq_list_pop(&chan->retrans_list);
2001 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2002 if (!skb) {
2003 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2004 seq);
2005 continue;
2008 bt_cb(skb)->l2cap.retries++;
2009 control = bt_cb(skb)->l2cap;
2011 if (chan->max_tx != 0 &&
2012 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2013 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2014 l2cap_send_disconn_req(chan, ECONNRESET);
2015 l2cap_seq_list_clear(&chan->retrans_list);
2016 break;
2019 control.reqseq = chan->buffer_seq;
2020 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2021 control.final = 1;
2022 else
2023 control.final = 0;
2025 if (skb_cloned(skb)) {
2026 /* Cloned sk_buffs are read-only, so we need a
2027 * writeable copy
2029 tx_skb = skb_copy(skb, GFP_KERNEL);
2030 } else {
2031 tx_skb = skb_clone(skb, GFP_KERNEL);
2034 if (!tx_skb) {
2035 l2cap_seq_list_clear(&chan->retrans_list);
2036 break;
2039 /* Update skb contents */
2040 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2041 put_unaligned_le32(__pack_extended_control(&control),
2042 tx_skb->data + L2CAP_HDR_SIZE);
2043 } else {
2044 put_unaligned_le16(__pack_enhanced_control(&control),
2045 tx_skb->data + L2CAP_HDR_SIZE);
2048 /* Update FCS */
2049 if (chan->fcs == L2CAP_FCS_CRC16) {
2050 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2051 tx_skb->len - L2CAP_FCS_SIZE);
2052 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2053 L2CAP_FCS_SIZE);
2056 l2cap_do_send(chan, tx_skb);
2058 BT_DBG("Resent txseq %d", control.txseq);
2060 chan->last_acked_seq = chan->buffer_seq;
2064 static void l2cap_retransmit(struct l2cap_chan *chan,
2065 struct l2cap_ctrl *control)
2067 BT_DBG("chan %p, control %p", chan, control);
2069 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2070 l2cap_ertm_resend(chan);
2073 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2074 struct l2cap_ctrl *control)
2076 struct sk_buff *skb;
2078 BT_DBG("chan %p, control %p", chan, control);
2080 if (control->poll)
2081 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2083 l2cap_seq_list_clear(&chan->retrans_list);
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2086 return;
2088 if (chan->unacked_frames) {
2089 skb_queue_walk(&chan->tx_q, skb) {
2090 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2091 skb == chan->tx_send_head)
2092 break;
2095 skb_queue_walk_from(&chan->tx_q, skb) {
2096 if (skb == chan->tx_send_head)
2097 break;
2099 l2cap_seq_list_append(&chan->retrans_list,
2100 bt_cb(skb)->l2cap.txseq);
2103 l2cap_ertm_resend(chan);
2107 static void l2cap_send_ack(struct l2cap_chan *chan)
2109 struct l2cap_ctrl control;
2110 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2111 chan->last_acked_seq);
2112 int threshold;
2114 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2115 chan, chan->last_acked_seq, chan->buffer_seq);
2117 memset(&control, 0, sizeof(control));
2118 control.sframe = 1;
2120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2121 chan->rx_state == L2CAP_RX_STATE_RECV) {
2122 __clear_ack_timer(chan);
2123 control.super = L2CAP_SUPER_RNR;
2124 control.reqseq = chan->buffer_seq;
2125 l2cap_send_sframe(chan, &control);
2126 } else {
2127 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2128 l2cap_ertm_send(chan);
2129 /* If any i-frames were sent, they included an ack */
2130 if (chan->buffer_seq == chan->last_acked_seq)
2131 frames_to_ack = 0;
2134 /* Ack now if the window is 3/4ths full.
2135 * Calculate without mul or div
2137 threshold = chan->ack_win;
2138 threshold += threshold << 1;
2139 threshold >>= 2;
2141 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2142 threshold);
2144 if (frames_to_ack >= threshold) {
2145 __clear_ack_timer(chan);
2146 control.super = L2CAP_SUPER_RR;
2147 control.reqseq = chan->buffer_seq;
2148 l2cap_send_sframe(chan, &control);
2149 frames_to_ack = 0;
2152 if (frames_to_ack)
2153 __set_ack_timer(chan);
2157 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2158 struct msghdr *msg, int len,
2159 int count, struct sk_buff *skb)
2161 struct l2cap_conn *conn = chan->conn;
2162 struct sk_buff **frag;
2163 int sent = 0;
2165 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2166 return -EFAULT;
2168 sent += count;
2169 len -= count;
2171 /* Continuation fragments (no L2CAP header) */
2172 frag = &skb_shinfo(skb)->frag_list;
2173 while (len) {
2174 struct sk_buff *tmp;
2176 count = min_t(unsigned int, conn->mtu, len);
2178 tmp = chan->ops->alloc_skb(chan, 0, count,
2179 msg->msg_flags & MSG_DONTWAIT);
2180 if (IS_ERR(tmp))
2181 return PTR_ERR(tmp);
2183 *frag = tmp;
2185 if (!copy_from_iter_full(skb_put(*frag, count), count,
2186 &msg->msg_iter))
2187 return -EFAULT;
2189 sent += count;
2190 len -= count;
2192 skb->len += (*frag)->len;
2193 skb->data_len += (*frag)->len;
2195 frag = &(*frag)->next;
2198 return sent;
2201 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2202 struct msghdr *msg, size_t len)
2204 struct l2cap_conn *conn = chan->conn;
2205 struct sk_buff *skb;
2206 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2207 struct l2cap_hdr *lh;
2209 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2210 __le16_to_cpu(chan->psm), len);
2212 count = min_t(unsigned int, (conn->mtu - hlen), len);
2214 skb = chan->ops->alloc_skb(chan, hlen, count,
2215 msg->msg_flags & MSG_DONTWAIT);
2216 if (IS_ERR(skb))
2217 return skb;
2219 /* Create L2CAP header */
2220 lh = skb_put(skb, L2CAP_HDR_SIZE);
2221 lh->cid = cpu_to_le16(chan->dcid);
2222 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2223 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2225 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2226 if (unlikely(err < 0)) {
2227 kfree_skb(skb);
2228 return ERR_PTR(err);
2230 return skb;
2233 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2234 struct msghdr *msg, size_t len)
2236 struct l2cap_conn *conn = chan->conn;
2237 struct sk_buff *skb;
2238 int err, count;
2239 struct l2cap_hdr *lh;
2241 BT_DBG("chan %p len %zu", chan, len);
2243 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2245 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2246 msg->msg_flags & MSG_DONTWAIT);
2247 if (IS_ERR(skb))
2248 return skb;
2250 /* Create L2CAP header */
2251 lh = skb_put(skb, L2CAP_HDR_SIZE);
2252 lh->cid = cpu_to_le16(chan->dcid);
2253 lh->len = cpu_to_le16(len);
2255 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2256 if (unlikely(err < 0)) {
2257 kfree_skb(skb);
2258 return ERR_PTR(err);
2260 return skb;
2263 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2264 struct msghdr *msg, size_t len,
2265 u16 sdulen)
2267 struct l2cap_conn *conn = chan->conn;
2268 struct sk_buff *skb;
2269 int err, count, hlen;
2270 struct l2cap_hdr *lh;
2272 BT_DBG("chan %p len %zu", chan, len);
2274 if (!conn)
2275 return ERR_PTR(-ENOTCONN);
2277 hlen = __ertm_hdr_size(chan);
2279 if (sdulen)
2280 hlen += L2CAP_SDULEN_SIZE;
2282 if (chan->fcs == L2CAP_FCS_CRC16)
2283 hlen += L2CAP_FCS_SIZE;
2285 count = min_t(unsigned int, (conn->mtu - hlen), len);
2287 skb = chan->ops->alloc_skb(chan, hlen, count,
2288 msg->msg_flags & MSG_DONTWAIT);
2289 if (IS_ERR(skb))
2290 return skb;
2292 /* Create L2CAP header */
2293 lh = skb_put(skb, L2CAP_HDR_SIZE);
2294 lh->cid = cpu_to_le16(chan->dcid);
2295 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2297 /* Control header is populated later */
2298 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2299 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2300 else
2301 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2303 if (sdulen)
2304 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2306 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2307 if (unlikely(err < 0)) {
2308 kfree_skb(skb);
2309 return ERR_PTR(err);
2312 bt_cb(skb)->l2cap.fcs = chan->fcs;
2313 bt_cb(skb)->l2cap.retries = 0;
2314 return skb;
2317 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2318 struct sk_buff_head *seg_queue,
2319 struct msghdr *msg, size_t len)
2321 struct sk_buff *skb;
2322 u16 sdu_len;
2323 size_t pdu_len;
2324 u8 sar;
2326 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2328 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2329 * so fragmented skbs are not used. The HCI layer's handling
2330 * of fragmented skbs is not compatible with ERTM's queueing.
2333 /* PDU size is derived from the HCI MTU */
2334 pdu_len = chan->conn->mtu;
2336 /* Constrain PDU size for BR/EDR connections */
2337 if (!chan->hs_hcon)
2338 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2340 /* Adjust for largest possible L2CAP overhead. */
2341 if (chan->fcs)
2342 pdu_len -= L2CAP_FCS_SIZE;
2344 pdu_len -= __ertm_hdr_size(chan);
2346 /* Remote device may have requested smaller PDUs */
2347 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2349 if (len <= pdu_len) {
2350 sar = L2CAP_SAR_UNSEGMENTED;
2351 sdu_len = 0;
2352 pdu_len = len;
2353 } else {
2354 sar = L2CAP_SAR_START;
2355 sdu_len = len;
2358 while (len > 0) {
2359 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2361 if (IS_ERR(skb)) {
2362 __skb_queue_purge(seg_queue);
2363 return PTR_ERR(skb);
2366 bt_cb(skb)->l2cap.sar = sar;
2367 __skb_queue_tail(seg_queue, skb);
2369 len -= pdu_len;
2370 if (sdu_len)
2371 sdu_len = 0;
2373 if (len <= pdu_len) {
2374 sar = L2CAP_SAR_END;
2375 pdu_len = len;
2376 } else {
2377 sar = L2CAP_SAR_CONTINUE;
2381 return 0;
2384 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2385 struct msghdr *msg,
2386 size_t len, u16 sdulen)
2388 struct l2cap_conn *conn = chan->conn;
2389 struct sk_buff *skb;
2390 int err, count, hlen;
2391 struct l2cap_hdr *lh;
2393 BT_DBG("chan %p len %zu", chan, len);
2395 if (!conn)
2396 return ERR_PTR(-ENOTCONN);
2398 hlen = L2CAP_HDR_SIZE;
2400 if (sdulen)
2401 hlen += L2CAP_SDULEN_SIZE;
2403 count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 skb = chan->ops->alloc_skb(chan, hlen, count,
2406 msg->msg_flags & MSG_DONTWAIT);
2407 if (IS_ERR(skb))
2408 return skb;
2410 /* Create L2CAP header */
2411 lh = skb_put(skb, L2CAP_HDR_SIZE);
2412 lh->cid = cpu_to_le16(chan->dcid);
2413 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2415 if (sdulen)
2416 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2418 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2419 if (unlikely(err < 0)) {
2420 kfree_skb(skb);
2421 return ERR_PTR(err);
2424 return skb;
2427 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2428 struct sk_buff_head *seg_queue,
2429 struct msghdr *msg, size_t len)
2431 struct sk_buff *skb;
2432 size_t pdu_len;
2433 u16 sdu_len;
2435 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2437 sdu_len = len;
2438 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2440 while (len > 0) {
2441 if (len <= pdu_len)
2442 pdu_len = len;
2444 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2445 if (IS_ERR(skb)) {
2446 __skb_queue_purge(seg_queue);
2447 return PTR_ERR(skb);
2450 __skb_queue_tail(seg_queue, skb);
2452 len -= pdu_len;
2454 if (sdu_len) {
2455 sdu_len = 0;
2456 pdu_len += L2CAP_SDULEN_SIZE;
2460 return 0;
2463 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2465 int sent = 0;
2467 BT_DBG("chan %p", chan);
2469 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2470 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2471 chan->tx_credits--;
2472 sent++;
2475 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2476 skb_queue_len(&chan->tx_q));
2479 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2481 struct sk_buff *skb;
2482 int err;
2483 struct sk_buff_head seg_queue;
2485 if (!chan->conn)
2486 return -ENOTCONN;
2488 /* Connectionless channel */
2489 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2490 skb = l2cap_create_connless_pdu(chan, msg, len);
2491 if (IS_ERR(skb))
2492 return PTR_ERR(skb);
2494 /* Channel lock is released before requesting new skb and then
2495 * reacquired thus we need to recheck channel state.
2497 if (chan->state != BT_CONNECTED) {
2498 kfree_skb(skb);
2499 return -ENOTCONN;
2502 l2cap_do_send(chan, skb);
2503 return len;
2506 switch (chan->mode) {
2507 case L2CAP_MODE_LE_FLOWCTL:
2508 /* Check outgoing MTU */
2509 if (len > chan->omtu)
2510 return -EMSGSIZE;
2512 __skb_queue_head_init(&seg_queue);
2514 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2516 if (chan->state != BT_CONNECTED) {
2517 __skb_queue_purge(&seg_queue);
2518 err = -ENOTCONN;
2521 if (err)
2522 return err;
2524 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2526 l2cap_le_flowctl_send(chan);
2528 if (!chan->tx_credits)
2529 chan->ops->suspend(chan);
2531 err = len;
2533 break;
2535 case L2CAP_MODE_BASIC:
2536 /* Check outgoing MTU */
2537 if (len > chan->omtu)
2538 return -EMSGSIZE;
2540 /* Create a basic PDU */
2541 skb = l2cap_create_basic_pdu(chan, msg, len);
2542 if (IS_ERR(skb))
2543 return PTR_ERR(skb);
2545 /* Channel lock is released before requesting new skb and then
2546 * reacquired thus we need to recheck channel state.
2548 if (chan->state != BT_CONNECTED) {
2549 kfree_skb(skb);
2550 return -ENOTCONN;
2553 l2cap_do_send(chan, skb);
2554 err = len;
2555 break;
2557 case L2CAP_MODE_ERTM:
2558 case L2CAP_MODE_STREAMING:
2559 /* Check outgoing MTU */
2560 if (len > chan->omtu) {
2561 err = -EMSGSIZE;
2562 break;
2565 __skb_queue_head_init(&seg_queue);
2567 /* Do segmentation before calling in to the state machine,
2568 * since it's possible to block while waiting for memory
2569 * allocation.
2571 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2573 /* The channel could have been closed while segmenting,
2574 * check that it is still connected.
2576 if (chan->state != BT_CONNECTED) {
2577 __skb_queue_purge(&seg_queue);
2578 err = -ENOTCONN;
2581 if (err)
2582 break;
2584 if (chan->mode == L2CAP_MODE_ERTM)
2585 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2586 else
2587 l2cap_streaming_send(chan, &seg_queue);
2589 err = len;
2591 /* If the skbs were not queued for sending, they'll still be in
2592 * seg_queue and need to be purged.
2594 __skb_queue_purge(&seg_queue);
2595 break;
2597 default:
2598 BT_DBG("bad state %1.1x", chan->mode);
2599 err = -EBADFD;
2602 return err;
2604 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2606 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2608 struct l2cap_ctrl control;
2609 u16 seq;
2611 BT_DBG("chan %p, txseq %u", chan, txseq);
2613 memset(&control, 0, sizeof(control));
2614 control.sframe = 1;
2615 control.super = L2CAP_SUPER_SREJ;
2617 for (seq = chan->expected_tx_seq; seq != txseq;
2618 seq = __next_seq(chan, seq)) {
2619 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2620 control.reqseq = seq;
2621 l2cap_send_sframe(chan, &control);
2622 l2cap_seq_list_append(&chan->srej_list, seq);
2626 chan->expected_tx_seq = __next_seq(chan, txseq);
2629 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2631 struct l2cap_ctrl control;
2633 BT_DBG("chan %p", chan);
2635 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2636 return;
2638 memset(&control, 0, sizeof(control));
2639 control.sframe = 1;
2640 control.super = L2CAP_SUPER_SREJ;
2641 control.reqseq = chan->srej_list.tail;
2642 l2cap_send_sframe(chan, &control);
2645 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2647 struct l2cap_ctrl control;
2648 u16 initial_head;
2649 u16 seq;
2651 BT_DBG("chan %p, txseq %u", chan, txseq);
2653 memset(&control, 0, sizeof(control));
2654 control.sframe = 1;
2655 control.super = L2CAP_SUPER_SREJ;
2657 /* Capture initial list head to allow only one pass through the list. */
2658 initial_head = chan->srej_list.head;
2660 do {
2661 seq = l2cap_seq_list_pop(&chan->srej_list);
2662 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2663 break;
2665 control.reqseq = seq;
2666 l2cap_send_sframe(chan, &control);
2667 l2cap_seq_list_append(&chan->srej_list, seq);
2668 } while (chan->srej_list.head != initial_head);
2671 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2673 struct sk_buff *acked_skb;
2674 u16 ackseq;
2676 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2678 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2679 return;
2681 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2682 chan->expected_ack_seq, chan->unacked_frames);
2684 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2685 ackseq = __next_seq(chan, ackseq)) {
2687 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2688 if (acked_skb) {
2689 skb_unlink(acked_skb, &chan->tx_q);
2690 kfree_skb(acked_skb);
2691 chan->unacked_frames--;
2695 chan->expected_ack_seq = reqseq;
2697 if (chan->unacked_frames == 0)
2698 __clear_retrans_timer(chan);
2700 BT_DBG("unacked_frames %u", chan->unacked_frames);
2703 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2705 BT_DBG("chan %p", chan);
2707 chan->expected_tx_seq = chan->buffer_seq;
2708 l2cap_seq_list_clear(&chan->srej_list);
2709 skb_queue_purge(&chan->srej_q);
2710 chan->rx_state = L2CAP_RX_STATE_RECV;
2713 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2714 struct l2cap_ctrl *control,
2715 struct sk_buff_head *skbs, u8 event)
2717 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 event);
2720 switch (event) {
2721 case L2CAP_EV_DATA_REQUEST:
2722 if (chan->tx_send_head == NULL)
2723 chan->tx_send_head = skb_peek(skbs);
2725 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2726 l2cap_ertm_send(chan);
2727 break;
2728 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2729 BT_DBG("Enter LOCAL_BUSY");
2730 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2732 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2733 /* The SREJ_SENT state must be aborted if we are to
2734 * enter the LOCAL_BUSY state.
2736 l2cap_abort_rx_srej_sent(chan);
2739 l2cap_send_ack(chan);
2741 break;
2742 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2743 BT_DBG("Exit LOCAL_BUSY");
2744 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2746 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2747 struct l2cap_ctrl local_control;
2749 memset(&local_control, 0, sizeof(local_control));
2750 local_control.sframe = 1;
2751 local_control.super = L2CAP_SUPER_RR;
2752 local_control.poll = 1;
2753 local_control.reqseq = chan->buffer_seq;
2754 l2cap_send_sframe(chan, &local_control);
2756 chan->retry_count = 1;
2757 __set_monitor_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2760 break;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2762 l2cap_process_reqseq(chan, control->reqseq);
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 l2cap_send_rr_or_rnr(chan, 1);
2766 chan->retry_count = 1;
2767 __set_monitor_timer(chan);
2768 __clear_ack_timer(chan);
2769 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2770 break;
2771 case L2CAP_EV_RETRANS_TO:
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 chan->retry_count = 1;
2774 __set_monitor_timer(chan);
2775 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2776 break;
2777 case L2CAP_EV_RECV_FBIT:
2778 /* Nothing to process */
2779 break;
2780 default:
2781 break;
2785 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2786 struct l2cap_ctrl *control,
2787 struct sk_buff_head *skbs, u8 event)
2789 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2790 event);
2792 switch (event) {
2793 case L2CAP_EV_DATA_REQUEST:
2794 if (chan->tx_send_head == NULL)
2795 chan->tx_send_head = skb_peek(skbs);
2796 /* Queue data, but don't send. */
2797 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2798 break;
2799 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2800 BT_DBG("Enter LOCAL_BUSY");
2801 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2803 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2804 /* The SREJ_SENT state must be aborted if we are to
2805 * enter the LOCAL_BUSY state.
2807 l2cap_abort_rx_srej_sent(chan);
2810 l2cap_send_ack(chan);
2812 break;
2813 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2814 BT_DBG("Exit LOCAL_BUSY");
2815 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2817 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2818 struct l2cap_ctrl local_control;
2819 memset(&local_control, 0, sizeof(local_control));
2820 local_control.sframe = 1;
2821 local_control.super = L2CAP_SUPER_RR;
2822 local_control.poll = 1;
2823 local_control.reqseq = chan->buffer_seq;
2824 l2cap_send_sframe(chan, &local_control);
2826 chan->retry_count = 1;
2827 __set_monitor_timer(chan);
2828 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2830 break;
2831 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2832 l2cap_process_reqseq(chan, control->reqseq);
2834 /* Fall through */
2836 case L2CAP_EV_RECV_FBIT:
2837 if (control && control->final) {
2838 __clear_monitor_timer(chan);
2839 if (chan->unacked_frames > 0)
2840 __set_retrans_timer(chan);
2841 chan->retry_count = 0;
2842 chan->tx_state = L2CAP_TX_STATE_XMIT;
2843 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2845 break;
2846 case L2CAP_EV_EXPLICIT_POLL:
2847 /* Ignore */
2848 break;
2849 case L2CAP_EV_MONITOR_TO:
2850 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2851 l2cap_send_rr_or_rnr(chan, 1);
2852 __set_monitor_timer(chan);
2853 chan->retry_count++;
2854 } else {
2855 l2cap_send_disconn_req(chan, ECONNABORTED);
2857 break;
2858 default:
2859 break;
2863 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2864 struct sk_buff_head *skbs, u8 event)
2866 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2867 chan, control, skbs, event, chan->tx_state);
2869 switch (chan->tx_state) {
2870 case L2CAP_TX_STATE_XMIT:
2871 l2cap_tx_state_xmit(chan, control, skbs, event);
2872 break;
2873 case L2CAP_TX_STATE_WAIT_F:
2874 l2cap_tx_state_wait_f(chan, control, skbs, event);
2875 break;
2876 default:
2877 /* Ignore event */
2878 break;
2882 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2883 struct l2cap_ctrl *control)
2885 BT_DBG("chan %p, control %p", chan, control);
2886 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2889 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2890 struct l2cap_ctrl *control)
2892 BT_DBG("chan %p, control %p", chan, control);
2893 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2896 /* Copy frame to all raw sockets on that connection */
2897 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2899 struct sk_buff *nskb;
2900 struct l2cap_chan *chan;
2902 BT_DBG("conn %p", conn);
2904 mutex_lock(&conn->chan_lock);
2906 list_for_each_entry(chan, &conn->chan_l, list) {
2907 if (chan->chan_type != L2CAP_CHAN_RAW)
2908 continue;
2910 /* Don't send frame to the channel it came from */
2911 if (bt_cb(skb)->l2cap.chan == chan)
2912 continue;
2914 nskb = skb_clone(skb, GFP_KERNEL);
2915 if (!nskb)
2916 continue;
2917 if (chan->ops->recv(chan, nskb))
2918 kfree_skb(nskb);
2921 mutex_unlock(&conn->chan_lock);
2924 /* ---- L2CAP signalling commands ---- */
2925 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2926 u8 ident, u16 dlen, void *data)
2928 struct sk_buff *skb, **frag;
2929 struct l2cap_cmd_hdr *cmd;
2930 struct l2cap_hdr *lh;
2931 int len, count;
2933 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2934 conn, code, ident, dlen);
2936 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2937 return NULL;
2939 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2940 count = min_t(unsigned int, conn->mtu, len);
2942 skb = bt_skb_alloc(count, GFP_KERNEL);
2943 if (!skb)
2944 return NULL;
2946 lh = skb_put(skb, L2CAP_HDR_SIZE);
2947 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2949 if (conn->hcon->type == LE_LINK)
2950 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2951 else
2952 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2954 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2955 cmd->code = code;
2956 cmd->ident = ident;
2957 cmd->len = cpu_to_le16(dlen);
2959 if (dlen) {
2960 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2961 skb_put_data(skb, data, count);
2962 data += count;
2965 len -= skb->len;
2967 /* Continuation fragments (no L2CAP header) */
2968 frag = &skb_shinfo(skb)->frag_list;
2969 while (len) {
2970 count = min_t(unsigned int, conn->mtu, len);
2972 *frag = bt_skb_alloc(count, GFP_KERNEL);
2973 if (!*frag)
2974 goto fail;
2976 skb_put_data(*frag, data, count);
2978 len -= count;
2979 data += count;
2981 frag = &(*frag)->next;
2984 return skb;
2986 fail:
2987 kfree_skb(skb);
2988 return NULL;
2991 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2992 unsigned long *val)
2994 struct l2cap_conf_opt *opt = *ptr;
2995 int len;
2997 len = L2CAP_CONF_OPT_SIZE + opt->len;
2998 *ptr += len;
3000 *type = opt->type;
3001 *olen = opt->len;
3003 switch (opt->len) {
3004 case 1:
3005 *val = *((u8 *) opt->val);
3006 break;
3008 case 2:
3009 *val = get_unaligned_le16(opt->val);
3010 break;
3012 case 4:
3013 *val = get_unaligned_le32(opt->val);
3014 break;
3016 default:
3017 *val = (unsigned long) opt->val;
3018 break;
3021 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3022 return len;
3025 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3027 struct l2cap_conf_opt *opt = *ptr;
3029 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3031 if (size < L2CAP_CONF_OPT_SIZE + len)
3032 return;
3034 opt->type = type;
3035 opt->len = len;
3037 switch (len) {
3038 case 1:
3039 *((u8 *) opt->val) = val;
3040 break;
3042 case 2:
3043 put_unaligned_le16(val, opt->val);
3044 break;
3046 case 4:
3047 put_unaligned_le32(val, opt->val);
3048 break;
3050 default:
3051 memcpy(opt->val, (void *) val, len);
3052 break;
3055 *ptr += L2CAP_CONF_OPT_SIZE + len;
3058 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3060 struct l2cap_conf_efs efs;
3062 switch (chan->mode) {
3063 case L2CAP_MODE_ERTM:
3064 efs.id = chan->local_id;
3065 efs.stype = chan->local_stype;
3066 efs.msdu = cpu_to_le16(chan->local_msdu);
3067 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3068 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3069 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3070 break;
3072 case L2CAP_MODE_STREAMING:
3073 efs.id = 1;
3074 efs.stype = L2CAP_SERV_BESTEFFORT;
3075 efs.msdu = cpu_to_le16(chan->local_msdu);
3076 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3077 efs.acc_lat = 0;
3078 efs.flush_to = 0;
3079 break;
3081 default:
3082 return;
3085 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3086 (unsigned long) &efs, size);
3089 static void l2cap_ack_timeout(struct work_struct *work)
3091 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3092 ack_timer.work);
3093 u16 frames_to_ack;
3095 BT_DBG("chan %p", chan);
3097 l2cap_chan_lock(chan);
3099 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3100 chan->last_acked_seq);
3102 if (frames_to_ack)
3103 l2cap_send_rr_or_rnr(chan, 0);
3105 l2cap_chan_unlock(chan);
3106 l2cap_chan_put(chan);
3109 int l2cap_ertm_init(struct l2cap_chan *chan)
3111 int err;
3113 chan->next_tx_seq = 0;
3114 chan->expected_tx_seq = 0;
3115 chan->expected_ack_seq = 0;
3116 chan->unacked_frames = 0;
3117 chan->buffer_seq = 0;
3118 chan->frames_sent = 0;
3119 chan->last_acked_seq = 0;
3120 chan->sdu = NULL;
3121 chan->sdu_last_frag = NULL;
3122 chan->sdu_len = 0;
3124 skb_queue_head_init(&chan->tx_q);
3126 chan->local_amp_id = AMP_ID_BREDR;
3127 chan->move_id = AMP_ID_BREDR;
3128 chan->move_state = L2CAP_MOVE_STABLE;
3129 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3131 if (chan->mode != L2CAP_MODE_ERTM)
3132 return 0;
3134 chan->rx_state = L2CAP_RX_STATE_RECV;
3135 chan->tx_state = L2CAP_TX_STATE_XMIT;
3137 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3138 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3139 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3141 skb_queue_head_init(&chan->srej_q);
3143 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3144 if (err < 0)
3145 return err;
3147 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3148 if (err < 0)
3149 l2cap_seq_list_free(&chan->srej_list);
3151 return err;
3154 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3156 switch (mode) {
3157 case L2CAP_MODE_STREAMING:
3158 case L2CAP_MODE_ERTM:
3159 if (l2cap_mode_supported(mode, remote_feat_mask))
3160 return mode;
3161 /* fall through */
3162 default:
3163 return L2CAP_MODE_BASIC;
3167 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3169 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3170 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3173 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3175 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3176 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3179 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3180 struct l2cap_conf_rfc *rfc)
3182 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3183 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3185 /* Class 1 devices have must have ERTM timeouts
3186 * exceeding the Link Supervision Timeout. The
3187 * default Link Supervision Timeout for AMP
3188 * controllers is 10 seconds.
3190 * Class 1 devices use 0xffffffff for their
3191 * best-effort flush timeout, so the clamping logic
3192 * will result in a timeout that meets the above
3193 * requirement. ERTM timeouts are 16-bit values, so
3194 * the maximum timeout is 65.535 seconds.
3197 /* Convert timeout to milliseconds and round */
3198 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3200 /* This is the recommended formula for class 2 devices
3201 * that start ERTM timers when packets are sent to the
3202 * controller.
3204 ertm_to = 3 * ertm_to + 500;
3206 if (ertm_to > 0xffff)
3207 ertm_to = 0xffff;
3209 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3210 rfc->monitor_timeout = rfc->retrans_timeout;
3211 } else {
3212 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3213 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3217 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3219 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3220 __l2cap_ews_supported(chan->conn)) {
3221 /* use extended control field */
3222 set_bit(FLAG_EXT_CTRL, &chan->flags);
3223 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3224 } else {
3225 chan->tx_win = min_t(u16, chan->tx_win,
3226 L2CAP_DEFAULT_TX_WINDOW);
3227 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3229 chan->ack_win = chan->tx_win;
3232 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3234 struct hci_conn *conn = chan->conn->hcon;
3236 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3238 /* The 2-DH1 packet has between 2 and 56 information bytes
3239 * (including the 2-byte payload header)
3241 if (!(conn->pkt_type & HCI_2DH1))
3242 chan->imtu = 54;
3244 /* The 3-DH1 packet has between 2 and 85 information bytes
3245 * (including the 2-byte payload header)
3247 if (!(conn->pkt_type & HCI_3DH1))
3248 chan->imtu = 83;
3250 /* The 2-DH3 packet has between 2 and 369 information bytes
3251 * (including the 2-byte payload header)
3253 if (!(conn->pkt_type & HCI_2DH3))
3254 chan->imtu = 367;
3256 /* The 3-DH3 packet has between 2 and 554 information bytes
3257 * (including the 2-byte payload header)
3259 if (!(conn->pkt_type & HCI_3DH3))
3260 chan->imtu = 552;
3262 /* The 2-DH5 packet has between 2 and 681 information bytes
3263 * (including the 2-byte payload header)
3265 if (!(conn->pkt_type & HCI_2DH5))
3266 chan->imtu = 679;
3268 /* The 3-DH5 packet has between 2 and 1023 information bytes
3269 * (including the 2-byte payload header)
3271 if (!(conn->pkt_type & HCI_3DH5))
3272 chan->imtu = 1021;
3275 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3277 struct l2cap_conf_req *req = data;
3278 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3279 void *ptr = req->data;
3280 void *endptr = data + data_size;
3281 u16 size;
3283 BT_DBG("chan %p", chan);
3285 if (chan->num_conf_req || chan->num_conf_rsp)
3286 goto done;
3288 switch (chan->mode) {
3289 case L2CAP_MODE_STREAMING:
3290 case L2CAP_MODE_ERTM:
3291 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3292 break;
3294 if (__l2cap_efs_supported(chan->conn))
3295 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3297 /* fall through */
3298 default:
3299 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3300 break;
3303 done:
3304 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3305 if (!chan->imtu)
3306 l2cap_mtu_auto(chan);
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3308 endptr - ptr);
3311 switch (chan->mode) {
3312 case L2CAP_MODE_BASIC:
3313 if (disable_ertm)
3314 break;
3316 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3317 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3318 break;
3320 rfc.mode = L2CAP_MODE_BASIC;
3321 rfc.txwin_size = 0;
3322 rfc.max_transmit = 0;
3323 rfc.retrans_timeout = 0;
3324 rfc.monitor_timeout = 0;
3325 rfc.max_pdu_size = 0;
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3328 (unsigned long) &rfc, endptr - ptr);
3329 break;
3331 case L2CAP_MODE_ERTM:
3332 rfc.mode = L2CAP_MODE_ERTM;
3333 rfc.max_transmit = chan->max_tx;
3335 __l2cap_set_ertm_timeouts(chan, &rfc);
3337 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3338 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3339 L2CAP_FCS_SIZE);
3340 rfc.max_pdu_size = cpu_to_le16(size);
3342 l2cap_txwin_setup(chan);
3344 rfc.txwin_size = min_t(u16, chan->tx_win,
3345 L2CAP_DEFAULT_TX_WINDOW);
3347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 (unsigned long) &rfc, endptr - ptr);
3350 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3351 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3353 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3355 chan->tx_win, endptr - ptr);
3357 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3358 if (chan->fcs == L2CAP_FCS_NONE ||
3359 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3360 chan->fcs = L2CAP_FCS_NONE;
3361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3362 chan->fcs, endptr - ptr);
3364 break;
3366 case L2CAP_MODE_STREAMING:
3367 l2cap_txwin_setup(chan);
3368 rfc.mode = L2CAP_MODE_STREAMING;
3369 rfc.txwin_size = 0;
3370 rfc.max_transmit = 0;
3371 rfc.retrans_timeout = 0;
3372 rfc.monitor_timeout = 0;
3374 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3375 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3376 L2CAP_FCS_SIZE);
3377 rfc.max_pdu_size = cpu_to_le16(size);
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3380 (unsigned long) &rfc, endptr - ptr);
3382 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3383 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3385 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3386 if (chan->fcs == L2CAP_FCS_NONE ||
3387 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3388 chan->fcs = L2CAP_FCS_NONE;
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3390 chan->fcs, endptr - ptr);
3392 break;
3395 req->dcid = cpu_to_le16(chan->dcid);
3396 req->flags = cpu_to_le16(0);
3398 return ptr - data;
3401 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3403 struct l2cap_conf_rsp *rsp = data;
3404 void *ptr = rsp->data;
3405 void *endptr = data + data_size;
3406 void *req = chan->conf_req;
3407 int len = chan->conf_len;
3408 int type, hint, olen;
3409 unsigned long val;
3410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3411 struct l2cap_conf_efs efs;
3412 u8 remote_efs = 0;
3413 u16 mtu = L2CAP_DEFAULT_MTU;
3414 u16 result = L2CAP_CONF_SUCCESS;
3415 u16 size;
3417 BT_DBG("chan %p", chan);
3419 while (len >= L2CAP_CONF_OPT_SIZE) {
3420 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3421 if (len < 0)
3422 break;
3424 hint = type & L2CAP_CONF_HINT;
3425 type &= L2CAP_CONF_MASK;
3427 switch (type) {
3428 case L2CAP_CONF_MTU:
3429 if (olen != 2)
3430 break;
3431 mtu = val;
3432 break;
3434 case L2CAP_CONF_FLUSH_TO:
3435 if (olen != 2)
3436 break;
3437 chan->flush_to = val;
3438 break;
3440 case L2CAP_CONF_QOS:
3441 break;
3443 case L2CAP_CONF_RFC:
3444 if (olen != sizeof(rfc))
3445 break;
3446 memcpy(&rfc, (void *) val, olen);
3447 break;
3449 case L2CAP_CONF_FCS:
3450 if (olen != 1)
3451 break;
3452 if (val == L2CAP_FCS_NONE)
3453 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3454 break;
3456 case L2CAP_CONF_EFS:
3457 if (olen != sizeof(efs))
3458 break;
3459 remote_efs = 1;
3460 memcpy(&efs, (void *) val, olen);
3461 break;
3463 case L2CAP_CONF_EWS:
3464 if (olen != 2)
3465 break;
3466 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3467 return -ECONNREFUSED;
3468 set_bit(FLAG_EXT_CTRL, &chan->flags);
3469 set_bit(CONF_EWS_RECV, &chan->conf_state);
3470 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3471 chan->remote_tx_win = val;
3472 break;
3474 default:
3475 if (hint)
3476 break;
3477 result = L2CAP_CONF_UNKNOWN;
3478 *((u8 *) ptr++) = type;
3479 break;
3483 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3484 goto done;
3486 switch (chan->mode) {
3487 case L2CAP_MODE_STREAMING:
3488 case L2CAP_MODE_ERTM:
3489 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3490 chan->mode = l2cap_select_mode(rfc.mode,
3491 chan->conn->feat_mask);
3492 break;
3495 if (remote_efs) {
3496 if (__l2cap_efs_supported(chan->conn))
3497 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3498 else
3499 return -ECONNREFUSED;
3502 if (chan->mode != rfc.mode)
3503 return -ECONNREFUSED;
3505 break;
3508 done:
3509 if (chan->mode != rfc.mode) {
3510 result = L2CAP_CONF_UNACCEPT;
3511 rfc.mode = chan->mode;
3513 if (chan->num_conf_rsp == 1)
3514 return -ECONNREFUSED;
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3517 (unsigned long) &rfc, endptr - ptr);
3520 if (result == L2CAP_CONF_SUCCESS) {
3521 /* Configure output options and let the other side know
3522 * which ones we don't like. */
3524 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3525 result = L2CAP_CONF_UNACCEPT;
3526 else {
3527 chan->omtu = mtu;
3528 set_bit(CONF_MTU_DONE, &chan->conf_state);
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3532 if (remote_efs) {
3533 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3534 efs.stype != L2CAP_SERV_NOTRAFIC &&
3535 efs.stype != chan->local_stype) {
3537 result = L2CAP_CONF_UNACCEPT;
3539 if (chan->num_conf_req >= 1)
3540 return -ECONNREFUSED;
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3543 sizeof(efs),
3544 (unsigned long) &efs, endptr - ptr);
3545 } else {
3546 /* Send PENDING Conf Rsp */
3547 result = L2CAP_CONF_PENDING;
3548 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3552 switch (rfc.mode) {
3553 case L2CAP_MODE_BASIC:
3554 chan->fcs = L2CAP_FCS_NONE;
3555 set_bit(CONF_MODE_DONE, &chan->conf_state);
3556 break;
3558 case L2CAP_MODE_ERTM:
3559 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3560 chan->remote_tx_win = rfc.txwin_size;
3561 else
3562 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3564 chan->remote_max_tx = rfc.max_transmit;
3566 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3567 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3568 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3569 rfc.max_pdu_size = cpu_to_le16(size);
3570 chan->remote_mps = size;
3572 __l2cap_set_ertm_timeouts(chan, &rfc);
3574 set_bit(CONF_MODE_DONE, &chan->conf_state);
3576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3577 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3579 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3580 chan->remote_id = efs.id;
3581 chan->remote_stype = efs.stype;
3582 chan->remote_msdu = le16_to_cpu(efs.msdu);
3583 chan->remote_flush_to =
3584 le32_to_cpu(efs.flush_to);
3585 chan->remote_acc_lat =
3586 le32_to_cpu(efs.acc_lat);
3587 chan->remote_sdu_itime =
3588 le32_to_cpu(efs.sdu_itime);
3589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3590 sizeof(efs),
3591 (unsigned long) &efs, endptr - ptr);
3593 break;
3595 case L2CAP_MODE_STREAMING:
3596 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3597 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3598 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3599 rfc.max_pdu_size = cpu_to_le16(size);
3600 chan->remote_mps = size;
3602 set_bit(CONF_MODE_DONE, &chan->conf_state);
3604 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3605 (unsigned long) &rfc, endptr - ptr);
3607 break;
3609 default:
3610 result = L2CAP_CONF_UNACCEPT;
3612 memset(&rfc, 0, sizeof(rfc));
3613 rfc.mode = chan->mode;
3616 if (result == L2CAP_CONF_SUCCESS)
3617 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3619 rsp->scid = cpu_to_le16(chan->dcid);
3620 rsp->result = cpu_to_le16(result);
3621 rsp->flags = cpu_to_le16(0);
3623 return ptr - data;
3626 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3627 void *data, size_t size, u16 *result)
3629 struct l2cap_conf_req *req = data;
3630 void *ptr = req->data;
3631 void *endptr = data + size;
3632 int type, olen;
3633 unsigned long val;
3634 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3635 struct l2cap_conf_efs efs;
3637 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3639 while (len >= L2CAP_CONF_OPT_SIZE) {
3640 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3641 if (len < 0)
3642 break;
3644 switch (type) {
3645 case L2CAP_CONF_MTU:
3646 if (olen != 2)
3647 break;
3648 if (val < L2CAP_DEFAULT_MIN_MTU) {
3649 *result = L2CAP_CONF_UNACCEPT;
3650 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3651 } else
3652 chan->imtu = val;
3653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3654 endptr - ptr);
3655 break;
3657 case L2CAP_CONF_FLUSH_TO:
3658 if (olen != 2)
3659 break;
3660 chan->flush_to = val;
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3662 chan->flush_to, endptr - ptr);
3663 break;
3665 case L2CAP_CONF_RFC:
3666 if (olen != sizeof(rfc))
3667 break;
3668 memcpy(&rfc, (void *)val, olen);
3669 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3670 rfc.mode != chan->mode)
3671 return -ECONNREFUSED;
3672 chan->fcs = 0;
3673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3674 (unsigned long) &rfc, endptr - ptr);
3675 break;
3677 case L2CAP_CONF_EWS:
3678 if (olen != 2)
3679 break;
3680 chan->ack_win = min_t(u16, val, chan->ack_win);
3681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3682 chan->tx_win, endptr - ptr);
3683 break;
3685 case L2CAP_CONF_EFS:
3686 if (olen != sizeof(efs))
3687 break;
3688 memcpy(&efs, (void *)val, olen);
3689 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3690 efs.stype != L2CAP_SERV_NOTRAFIC &&
3691 efs.stype != chan->local_stype)
3692 return -ECONNREFUSED;
3693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3694 (unsigned long) &efs, endptr - ptr);
3695 break;
3697 case L2CAP_CONF_FCS:
3698 if (olen != 1)
3699 break;
3700 if (*result == L2CAP_CONF_PENDING)
3701 if (val == L2CAP_FCS_NONE)
3702 set_bit(CONF_RECV_NO_FCS,
3703 &chan->conf_state);
3704 break;
3708 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3709 return -ECONNREFUSED;
3711 chan->mode = rfc.mode;
3713 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3714 switch (rfc.mode) {
3715 case L2CAP_MODE_ERTM:
3716 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3717 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3718 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3719 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3720 chan->ack_win = min_t(u16, chan->ack_win,
3721 rfc.txwin_size);
3723 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3724 chan->local_msdu = le16_to_cpu(efs.msdu);
3725 chan->local_sdu_itime =
3726 le32_to_cpu(efs.sdu_itime);
3727 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3728 chan->local_flush_to =
3729 le32_to_cpu(efs.flush_to);
3731 break;
3733 case L2CAP_MODE_STREAMING:
3734 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3738 req->dcid = cpu_to_le16(chan->dcid);
3739 req->flags = cpu_to_le16(0);
3741 return ptr - data;
3744 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3745 u16 result, u16 flags)
3747 struct l2cap_conf_rsp *rsp = data;
3748 void *ptr = rsp->data;
3750 BT_DBG("chan %p", chan);
3752 rsp->scid = cpu_to_le16(chan->dcid);
3753 rsp->result = cpu_to_le16(result);
3754 rsp->flags = cpu_to_le16(flags);
3756 return ptr - data;
3759 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3761 struct l2cap_le_conn_rsp rsp;
3762 struct l2cap_conn *conn = chan->conn;
3764 BT_DBG("chan %p", chan);
3766 rsp.dcid = cpu_to_le16(chan->scid);
3767 rsp.mtu = cpu_to_le16(chan->imtu);
3768 rsp.mps = cpu_to_le16(chan->mps);
3769 rsp.credits = cpu_to_le16(chan->rx_credits);
3770 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3772 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3773 &rsp);
3776 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3778 struct l2cap_conn_rsp rsp;
3779 struct l2cap_conn *conn = chan->conn;
3780 u8 buf[128];
3781 u8 rsp_code;
3783 rsp.scid = cpu_to_le16(chan->dcid);
3784 rsp.dcid = cpu_to_le16(chan->scid);
3785 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3786 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3788 if (chan->hs_hcon)
3789 rsp_code = L2CAP_CREATE_CHAN_RSP;
3790 else
3791 rsp_code = L2CAP_CONN_RSP;
3793 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3795 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3797 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3798 return;
3800 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3801 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3802 chan->num_conf_req++;
3805 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3807 int type, olen;
3808 unsigned long val;
3809 /* Use sane default values in case a misbehaving remote device
3810 * did not send an RFC or extended window size option.
3812 u16 txwin_ext = chan->ack_win;
3813 struct l2cap_conf_rfc rfc = {
3814 .mode = chan->mode,
3815 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3816 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3817 .max_pdu_size = cpu_to_le16(chan->imtu),
3818 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3821 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3823 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3824 return;
3826 while (len >= L2CAP_CONF_OPT_SIZE) {
3827 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3828 if (len < 0)
3829 break;
3831 switch (type) {
3832 case L2CAP_CONF_RFC:
3833 if (olen != sizeof(rfc))
3834 break;
3835 memcpy(&rfc, (void *)val, olen);
3836 break;
3837 case L2CAP_CONF_EWS:
3838 if (olen != 2)
3839 break;
3840 txwin_ext = val;
3841 break;
3845 switch (rfc.mode) {
3846 case L2CAP_MODE_ERTM:
3847 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3848 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3849 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3850 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3851 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3852 else
3853 chan->ack_win = min_t(u16, chan->ack_win,
3854 rfc.txwin_size);
3855 break;
3856 case L2CAP_MODE_STREAMING:
3857 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3861 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3862 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3863 u8 *data)
3865 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3867 if (cmd_len < sizeof(*rej))
3868 return -EPROTO;
3870 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3871 return 0;
3873 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3874 cmd->ident == conn->info_ident) {
3875 cancel_delayed_work(&conn->info_timer);
3877 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3878 conn->info_ident = 0;
3880 l2cap_conn_start(conn);
3883 return 0;
3886 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd,
3888 u8 *data, u8 rsp_code, u8 amp_id)
3890 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3891 struct l2cap_conn_rsp rsp;
3892 struct l2cap_chan *chan = NULL, *pchan;
3893 int result, status = L2CAP_CS_NO_INFO;
3895 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3896 __le16 psm = req->psm;
3898 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3900 /* Check if we have socket listening on psm */
3901 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3902 &conn->hcon->dst, ACL_LINK);
3903 if (!pchan) {
3904 result = L2CAP_CR_BAD_PSM;
3905 goto sendresp;
3908 mutex_lock(&conn->chan_lock);
3909 l2cap_chan_lock(pchan);
3911 /* Check if the ACL is secure enough (if not SDP) */
3912 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3913 !hci_conn_check_link_mode(conn->hcon)) {
3914 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3915 result = L2CAP_CR_SEC_BLOCK;
3916 goto response;
3919 result = L2CAP_CR_NO_MEM;
3921 /* Check for valid dynamic CID range (as per Erratum 3253) */
3922 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3923 result = L2CAP_CR_INVALID_SCID;
3924 goto response;
3927 /* Check if we already have channel with that dcid */
3928 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3929 result = L2CAP_CR_SCID_IN_USE;
3930 goto response;
3933 chan = pchan->ops->new_connection(pchan);
3934 if (!chan)
3935 goto response;
3937 /* For certain devices (ex: HID mouse), support for authentication,
3938 * pairing and bonding is optional. For such devices, inorder to avoid
3939 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3940 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3942 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3944 bacpy(&chan->src, &conn->hcon->src);
3945 bacpy(&chan->dst, &conn->hcon->dst);
3946 chan->src_type = bdaddr_src_type(conn->hcon);
3947 chan->dst_type = bdaddr_dst_type(conn->hcon);
3948 chan->psm = psm;
3949 chan->dcid = scid;
3950 chan->local_amp_id = amp_id;
3952 __l2cap_chan_add(conn, chan);
3954 dcid = chan->scid;
3956 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3958 chan->ident = cmd->ident;
3960 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3961 if (l2cap_chan_check_security(chan, false)) {
3962 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3963 l2cap_state_change(chan, BT_CONNECT2);
3964 result = L2CAP_CR_PEND;
3965 status = L2CAP_CS_AUTHOR_PEND;
3966 chan->ops->defer(chan);
3967 } else {
3968 /* Force pending result for AMP controllers.
3969 * The connection will succeed after the
3970 * physical link is up.
3972 if (amp_id == AMP_ID_BREDR) {
3973 l2cap_state_change(chan, BT_CONFIG);
3974 result = L2CAP_CR_SUCCESS;
3975 } else {
3976 l2cap_state_change(chan, BT_CONNECT2);
3977 result = L2CAP_CR_PEND;
3979 status = L2CAP_CS_NO_INFO;
3981 } else {
3982 l2cap_state_change(chan, BT_CONNECT2);
3983 result = L2CAP_CR_PEND;
3984 status = L2CAP_CS_AUTHEN_PEND;
3986 } else {
3987 l2cap_state_change(chan, BT_CONNECT2);
3988 result = L2CAP_CR_PEND;
3989 status = L2CAP_CS_NO_INFO;
3992 response:
3993 l2cap_chan_unlock(pchan);
3994 mutex_unlock(&conn->chan_lock);
3995 l2cap_chan_put(pchan);
3997 sendresp:
3998 rsp.scid = cpu_to_le16(scid);
3999 rsp.dcid = cpu_to_le16(dcid);
4000 rsp.result = cpu_to_le16(result);
4001 rsp.status = cpu_to_le16(status);
4002 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4004 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4005 struct l2cap_info_req info;
4006 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4008 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4009 conn->info_ident = l2cap_get_ident(conn);
4011 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4013 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4014 sizeof(info), &info);
4017 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4018 result == L2CAP_CR_SUCCESS) {
4019 u8 buf[128];
4020 set_bit(CONF_REQ_SENT, &chan->conf_state);
4021 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4022 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4023 chan->num_conf_req++;
4026 return chan;
4029 static int l2cap_connect_req(struct l2cap_conn *conn,
4030 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4032 struct hci_dev *hdev = conn->hcon->hdev;
4033 struct hci_conn *hcon = conn->hcon;
4035 if (cmd_len < sizeof(struct l2cap_conn_req))
4036 return -EPROTO;
4038 hci_dev_lock(hdev);
4039 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4040 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4041 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4042 hci_dev_unlock(hdev);
4044 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4045 return 0;
4048 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4049 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4050 u8 *data)
4052 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4053 u16 scid, dcid, result, status;
4054 struct l2cap_chan *chan;
4055 u8 req[128];
4056 int err;
4058 if (cmd_len < sizeof(*rsp))
4059 return -EPROTO;
4061 scid = __le16_to_cpu(rsp->scid);
4062 dcid = __le16_to_cpu(rsp->dcid);
4063 result = __le16_to_cpu(rsp->result);
4064 status = __le16_to_cpu(rsp->status);
4066 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4067 dcid, scid, result, status);
4069 mutex_lock(&conn->chan_lock);
4071 if (scid) {
4072 chan = __l2cap_get_chan_by_scid(conn, scid);
4073 if (!chan) {
4074 err = -EBADSLT;
4075 goto unlock;
4077 } else {
4078 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4079 if (!chan) {
4080 err = -EBADSLT;
4081 goto unlock;
4085 err = 0;
4087 l2cap_chan_lock(chan);
4089 switch (result) {
4090 case L2CAP_CR_SUCCESS:
4091 l2cap_state_change(chan, BT_CONFIG);
4092 chan->ident = 0;
4093 chan->dcid = dcid;
4094 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4096 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4097 break;
4099 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4100 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4101 chan->num_conf_req++;
4102 break;
4104 case L2CAP_CR_PEND:
4105 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4106 break;
4108 default:
4109 l2cap_chan_del(chan, ECONNREFUSED);
4110 break;
4113 l2cap_chan_unlock(chan);
4115 unlock:
4116 mutex_unlock(&conn->chan_lock);
4118 return err;
4121 static inline void set_default_fcs(struct l2cap_chan *chan)
4123 /* FCS is enabled only in ERTM or streaming mode, if one or both
4124 * sides request it.
4126 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4127 chan->fcs = L2CAP_FCS_NONE;
4128 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4129 chan->fcs = L2CAP_FCS_CRC16;
4132 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4133 u8 ident, u16 flags)
4135 struct l2cap_conn *conn = chan->conn;
4137 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4138 flags);
4140 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4141 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4143 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4144 l2cap_build_conf_rsp(chan, data,
4145 L2CAP_CONF_SUCCESS, flags), data);
4148 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4149 u16 scid, u16 dcid)
4151 struct l2cap_cmd_rej_cid rej;
4153 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4154 rej.scid = __cpu_to_le16(scid);
4155 rej.dcid = __cpu_to_le16(dcid);
4157 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4160 static inline int l2cap_config_req(struct l2cap_conn *conn,
4161 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4162 u8 *data)
4164 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4165 u16 dcid, flags;
4166 u8 rsp[64];
4167 struct l2cap_chan *chan;
4168 int len, err = 0;
4170 if (cmd_len < sizeof(*req))
4171 return -EPROTO;
4173 dcid = __le16_to_cpu(req->dcid);
4174 flags = __le16_to_cpu(req->flags);
4176 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4178 chan = l2cap_get_chan_by_scid(conn, dcid);
4179 if (!chan) {
4180 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4181 return 0;
4184 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4185 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4186 chan->dcid);
4187 goto unlock;
4190 /* Reject if config buffer is too small. */
4191 len = cmd_len - sizeof(*req);
4192 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4193 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4194 l2cap_build_conf_rsp(chan, rsp,
4195 L2CAP_CONF_REJECT, flags), rsp);
4196 goto unlock;
4199 /* Store config. */
4200 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4201 chan->conf_len += len;
4203 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4204 /* Incomplete config. Send empty response. */
4205 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4206 l2cap_build_conf_rsp(chan, rsp,
4207 L2CAP_CONF_SUCCESS, flags), rsp);
4208 goto unlock;
4211 /* Complete config. */
4212 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4213 if (len < 0) {
4214 l2cap_send_disconn_req(chan, ECONNRESET);
4215 goto unlock;
4218 chan->ident = cmd->ident;
4219 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4220 chan->num_conf_rsp++;
4222 /* Reset config buffer. */
4223 chan->conf_len = 0;
4225 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4226 goto unlock;
4228 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4229 set_default_fcs(chan);
4231 if (chan->mode == L2CAP_MODE_ERTM ||
4232 chan->mode == L2CAP_MODE_STREAMING)
4233 err = l2cap_ertm_init(chan);
4235 if (err < 0)
4236 l2cap_send_disconn_req(chan, -err);
4237 else
4238 l2cap_chan_ready(chan);
4240 goto unlock;
4243 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4244 u8 buf[64];
4245 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4246 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4247 chan->num_conf_req++;
4250 /* Got Conf Rsp PENDING from remote side and assume we sent
4251 Conf Rsp PENDING in the code above */
4252 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4253 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4255 /* check compatibility */
4257 /* Send rsp for BR/EDR channel */
4258 if (!chan->hs_hcon)
4259 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4260 else
4261 chan->ident = cmd->ident;
4264 unlock:
4265 l2cap_chan_unlock(chan);
4266 return err;
4269 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4270 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4271 u8 *data)
4273 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4274 u16 scid, flags, result;
4275 struct l2cap_chan *chan;
4276 int len = cmd_len - sizeof(*rsp);
4277 int err = 0;
4279 if (cmd_len < sizeof(*rsp))
4280 return -EPROTO;
4282 scid = __le16_to_cpu(rsp->scid);
4283 flags = __le16_to_cpu(rsp->flags);
4284 result = __le16_to_cpu(rsp->result);
4286 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4287 result, len);
4289 chan = l2cap_get_chan_by_scid(conn, scid);
4290 if (!chan)
4291 return 0;
4293 switch (result) {
4294 case L2CAP_CONF_SUCCESS:
4295 l2cap_conf_rfc_get(chan, rsp->data, len);
4296 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4297 break;
4299 case L2CAP_CONF_PENDING:
4300 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4302 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4303 char buf[64];
4305 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4306 buf, sizeof(buf), &result);
4307 if (len < 0) {
4308 l2cap_send_disconn_req(chan, ECONNRESET);
4309 goto done;
4312 if (!chan->hs_hcon) {
4313 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4315 } else {
4316 if (l2cap_check_efs(chan)) {
4317 amp_create_logical_link(chan);
4318 chan->ident = cmd->ident;
4322 goto done;
4324 case L2CAP_CONF_UNACCEPT:
4325 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4326 char req[64];
4328 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4329 l2cap_send_disconn_req(chan, ECONNRESET);
4330 goto done;
4333 /* throw out any old stored conf requests */
4334 result = L2CAP_CONF_SUCCESS;
4335 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4336 req, sizeof(req), &result);
4337 if (len < 0) {
4338 l2cap_send_disconn_req(chan, ECONNRESET);
4339 goto done;
4342 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4343 L2CAP_CONF_REQ, len, req);
4344 chan->num_conf_req++;
4345 if (result != L2CAP_CONF_SUCCESS)
4346 goto done;
4347 break;
4349 /* fall through */
4351 default:
4352 l2cap_chan_set_err(chan, ECONNRESET);
4354 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4355 l2cap_send_disconn_req(chan, ECONNRESET);
4356 goto done;
4359 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4360 goto done;
4362 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4364 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4365 set_default_fcs(chan);
4367 if (chan->mode == L2CAP_MODE_ERTM ||
4368 chan->mode == L2CAP_MODE_STREAMING)
4369 err = l2cap_ertm_init(chan);
4371 if (err < 0)
4372 l2cap_send_disconn_req(chan, -err);
4373 else
4374 l2cap_chan_ready(chan);
4377 done:
4378 l2cap_chan_unlock(chan);
4379 return err;
4382 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4383 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4384 u8 *data)
4386 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4387 struct l2cap_disconn_rsp rsp;
4388 u16 dcid, scid;
4389 struct l2cap_chan *chan;
4391 if (cmd_len != sizeof(*req))
4392 return -EPROTO;
4394 scid = __le16_to_cpu(req->scid);
4395 dcid = __le16_to_cpu(req->dcid);
4397 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4399 mutex_lock(&conn->chan_lock);
4401 chan = __l2cap_get_chan_by_scid(conn, dcid);
4402 if (!chan) {
4403 mutex_unlock(&conn->chan_lock);
4404 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4405 return 0;
4408 l2cap_chan_lock(chan);
4410 rsp.dcid = cpu_to_le16(chan->scid);
4411 rsp.scid = cpu_to_le16(chan->dcid);
4412 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4414 chan->ops->set_shutdown(chan);
4416 l2cap_chan_hold(chan);
4417 l2cap_chan_del(chan, ECONNRESET);
4419 l2cap_chan_unlock(chan);
4421 chan->ops->close(chan);
4422 l2cap_chan_put(chan);
4424 mutex_unlock(&conn->chan_lock);
4426 return 0;
4429 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4430 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4431 u8 *data)
4433 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4434 u16 dcid, scid;
4435 struct l2cap_chan *chan;
4437 if (cmd_len != sizeof(*rsp))
4438 return -EPROTO;
4440 scid = __le16_to_cpu(rsp->scid);
4441 dcid = __le16_to_cpu(rsp->dcid);
4443 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4445 mutex_lock(&conn->chan_lock);
4447 chan = __l2cap_get_chan_by_scid(conn, scid);
4448 if (!chan) {
4449 mutex_unlock(&conn->chan_lock);
4450 return 0;
4453 l2cap_chan_lock(chan);
4455 if (chan->state != BT_DISCONN) {
4456 l2cap_chan_unlock(chan);
4457 mutex_unlock(&conn->chan_lock);
4458 return 0;
4461 l2cap_chan_hold(chan);
4462 l2cap_chan_del(chan, 0);
4464 l2cap_chan_unlock(chan);
4466 chan->ops->close(chan);
4467 l2cap_chan_put(chan);
4469 mutex_unlock(&conn->chan_lock);
4471 return 0;
4474 static inline int l2cap_information_req(struct l2cap_conn *conn,
4475 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4476 u8 *data)
4478 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4479 u16 type;
4481 if (cmd_len != sizeof(*req))
4482 return -EPROTO;
4484 type = __le16_to_cpu(req->type);
4486 BT_DBG("type 0x%4.4x", type);
4488 if (type == L2CAP_IT_FEAT_MASK) {
4489 u8 buf[8];
4490 u32 feat_mask = l2cap_feat_mask;
4491 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4492 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4493 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4494 if (!disable_ertm)
4495 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4496 | L2CAP_FEAT_FCS;
4497 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4498 feat_mask |= L2CAP_FEAT_EXT_FLOW
4499 | L2CAP_FEAT_EXT_WINDOW;
4501 put_unaligned_le32(feat_mask, rsp->data);
4502 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4503 buf);
4504 } else if (type == L2CAP_IT_FIXED_CHAN) {
4505 u8 buf[12];
4506 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4508 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4509 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4510 rsp->data[0] = conn->local_fixed_chan;
4511 memset(rsp->data + 1, 0, 7);
4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4513 buf);
4514 } else {
4515 struct l2cap_info_rsp rsp;
4516 rsp.type = cpu_to_le16(type);
4517 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4518 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4519 &rsp);
4522 return 0;
4525 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4526 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4527 u8 *data)
4529 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4530 u16 type, result;
4532 if (cmd_len < sizeof(*rsp))
4533 return -EPROTO;
4535 type = __le16_to_cpu(rsp->type);
4536 result = __le16_to_cpu(rsp->result);
4538 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4540 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4541 if (cmd->ident != conn->info_ident ||
4542 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4543 return 0;
4545 cancel_delayed_work(&conn->info_timer);
4547 if (result != L2CAP_IR_SUCCESS) {
4548 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4549 conn->info_ident = 0;
4551 l2cap_conn_start(conn);
4553 return 0;
4556 switch (type) {
4557 case L2CAP_IT_FEAT_MASK:
4558 conn->feat_mask = get_unaligned_le32(rsp->data);
4560 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4561 struct l2cap_info_req req;
4562 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4564 conn->info_ident = l2cap_get_ident(conn);
4566 l2cap_send_cmd(conn, conn->info_ident,
4567 L2CAP_INFO_REQ, sizeof(req), &req);
4568 } else {
4569 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4570 conn->info_ident = 0;
4572 l2cap_conn_start(conn);
4574 break;
4576 case L2CAP_IT_FIXED_CHAN:
4577 conn->remote_fixed_chan = rsp->data[0];
4578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4579 conn->info_ident = 0;
4581 l2cap_conn_start(conn);
4582 break;
4585 return 0;
4588 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4589 struct l2cap_cmd_hdr *cmd,
4590 u16 cmd_len, void *data)
4592 struct l2cap_create_chan_req *req = data;
4593 struct l2cap_create_chan_rsp rsp;
4594 struct l2cap_chan *chan;
4595 struct hci_dev *hdev;
4596 u16 psm, scid;
4598 if (cmd_len != sizeof(*req))
4599 return -EPROTO;
4601 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4602 return -EINVAL;
4604 psm = le16_to_cpu(req->psm);
4605 scid = le16_to_cpu(req->scid);
4607 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4609 /* For controller id 0 make BR/EDR connection */
4610 if (req->amp_id == AMP_ID_BREDR) {
4611 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4612 req->amp_id);
4613 return 0;
4616 /* Validate AMP controller id */
4617 hdev = hci_dev_get(req->amp_id);
4618 if (!hdev)
4619 goto error;
4621 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4622 hci_dev_put(hdev);
4623 goto error;
4626 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4627 req->amp_id);
4628 if (chan) {
4629 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4630 struct hci_conn *hs_hcon;
4632 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4633 &conn->hcon->dst);
4634 if (!hs_hcon) {
4635 hci_dev_put(hdev);
4636 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4637 chan->dcid);
4638 return 0;
4641 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4643 mgr->bredr_chan = chan;
4644 chan->hs_hcon = hs_hcon;
4645 chan->fcs = L2CAP_FCS_NONE;
4646 conn->mtu = hdev->block_mtu;
4649 hci_dev_put(hdev);
4651 return 0;
4653 error:
4654 rsp.dcid = 0;
4655 rsp.scid = cpu_to_le16(scid);
4656 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4657 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4659 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4660 sizeof(rsp), &rsp);
4662 return 0;
4665 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4667 struct l2cap_move_chan_req req;
4668 u8 ident;
4670 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4672 ident = l2cap_get_ident(chan->conn);
4673 chan->ident = ident;
4675 req.icid = cpu_to_le16(chan->scid);
4676 req.dest_amp_id = dest_amp_id;
4678 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4679 &req);
4681 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4684 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4686 struct l2cap_move_chan_rsp rsp;
4688 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4690 rsp.icid = cpu_to_le16(chan->dcid);
4691 rsp.result = cpu_to_le16(result);
4693 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4694 sizeof(rsp), &rsp);
4697 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4699 struct l2cap_move_chan_cfm cfm;
4701 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4703 chan->ident = l2cap_get_ident(chan->conn);
4705 cfm.icid = cpu_to_le16(chan->scid);
4706 cfm.result = cpu_to_le16(result);
4708 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4709 sizeof(cfm), &cfm);
4711 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4714 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4716 struct l2cap_move_chan_cfm cfm;
4718 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4720 cfm.icid = cpu_to_le16(icid);
4721 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4723 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4724 sizeof(cfm), &cfm);
4727 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4728 u16 icid)
4730 struct l2cap_move_chan_cfm_rsp rsp;
4732 BT_DBG("icid 0x%4.4x", icid);
4734 rsp.icid = cpu_to_le16(icid);
4735 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4738 static void __release_logical_link(struct l2cap_chan *chan)
4740 chan->hs_hchan = NULL;
4741 chan->hs_hcon = NULL;
4743 /* Placeholder - release the logical link */
4746 static void l2cap_logical_fail(struct l2cap_chan *chan)
4748 /* Logical link setup failed */
4749 if (chan->state != BT_CONNECTED) {
4750 /* Create channel failure, disconnect */
4751 l2cap_send_disconn_req(chan, ECONNRESET);
4752 return;
4755 switch (chan->move_role) {
4756 case L2CAP_MOVE_ROLE_RESPONDER:
4757 l2cap_move_done(chan);
4758 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4759 break;
4760 case L2CAP_MOVE_ROLE_INITIATOR:
4761 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4762 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4763 /* Remote has only sent pending or
4764 * success responses, clean up
4766 l2cap_move_done(chan);
4769 /* Other amp move states imply that the move
4770 * has already aborted
4772 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4773 break;
4777 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4778 struct hci_chan *hchan)
4780 struct l2cap_conf_rsp rsp;
4782 chan->hs_hchan = hchan;
4783 chan->hs_hcon->l2cap_data = chan->conn;
4785 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4787 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4788 int err;
4790 set_default_fcs(chan);
4792 err = l2cap_ertm_init(chan);
4793 if (err < 0)
4794 l2cap_send_disconn_req(chan, -err);
4795 else
4796 l2cap_chan_ready(chan);
4800 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4801 struct hci_chan *hchan)
4803 chan->hs_hcon = hchan->conn;
4804 chan->hs_hcon->l2cap_data = chan->conn;
4806 BT_DBG("move_state %d", chan->move_state);
4808 switch (chan->move_state) {
4809 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4810 /* Move confirm will be sent after a success
4811 * response is received
4813 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4814 break;
4815 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4816 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4817 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4818 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4819 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4820 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4821 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4822 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4823 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4825 break;
4826 default:
4827 /* Move was not in expected state, free the channel */
4828 __release_logical_link(chan);
4830 chan->move_state = L2CAP_MOVE_STABLE;
4834 /* Call with chan locked */
4835 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4836 u8 status)
4838 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4840 if (status) {
4841 l2cap_logical_fail(chan);
4842 __release_logical_link(chan);
4843 return;
4846 if (chan->state != BT_CONNECTED) {
4847 /* Ignore logical link if channel is on BR/EDR */
4848 if (chan->local_amp_id != AMP_ID_BREDR)
4849 l2cap_logical_finish_create(chan, hchan);
4850 } else {
4851 l2cap_logical_finish_move(chan, hchan);
4855 void l2cap_move_start(struct l2cap_chan *chan)
4857 BT_DBG("chan %p", chan);
4859 if (chan->local_amp_id == AMP_ID_BREDR) {
4860 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4861 return;
4862 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4863 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4864 /* Placeholder - start physical link setup */
4865 } else {
4866 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4867 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4868 chan->move_id = 0;
4869 l2cap_move_setup(chan);
4870 l2cap_send_move_chan_req(chan, 0);
4874 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4875 u8 local_amp_id, u8 remote_amp_id)
4877 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4878 local_amp_id, remote_amp_id);
4880 chan->fcs = L2CAP_FCS_NONE;
4882 /* Outgoing channel on AMP */
4883 if (chan->state == BT_CONNECT) {
4884 if (result == L2CAP_CR_SUCCESS) {
4885 chan->local_amp_id = local_amp_id;
4886 l2cap_send_create_chan_req(chan, remote_amp_id);
4887 } else {
4888 /* Revert to BR/EDR connect */
4889 l2cap_send_conn_req(chan);
4892 return;
4895 /* Incoming channel on AMP */
4896 if (__l2cap_no_conn_pending(chan)) {
4897 struct l2cap_conn_rsp rsp;
4898 char buf[128];
4899 rsp.scid = cpu_to_le16(chan->dcid);
4900 rsp.dcid = cpu_to_le16(chan->scid);
4902 if (result == L2CAP_CR_SUCCESS) {
4903 /* Send successful response */
4904 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4905 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4906 } else {
4907 /* Send negative response */
4908 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4909 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4912 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4913 sizeof(rsp), &rsp);
4915 if (result == L2CAP_CR_SUCCESS) {
4916 l2cap_state_change(chan, BT_CONFIG);
4917 set_bit(CONF_REQ_SENT, &chan->conf_state);
4918 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4919 L2CAP_CONF_REQ,
4920 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4921 chan->num_conf_req++;
4926 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4927 u8 remote_amp_id)
4929 l2cap_move_setup(chan);
4930 chan->move_id = local_amp_id;
4931 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4933 l2cap_send_move_chan_req(chan, remote_amp_id);
4936 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4938 struct hci_chan *hchan = NULL;
4940 /* Placeholder - get hci_chan for logical link */
4942 if (hchan) {
4943 if (hchan->state == BT_CONNECTED) {
4944 /* Logical link is ready to go */
4945 chan->hs_hcon = hchan->conn;
4946 chan->hs_hcon->l2cap_data = chan->conn;
4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4948 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4950 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4951 } else {
4952 /* Wait for logical link to be ready */
4953 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4955 } else {
4956 /* Logical link not available */
4957 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4961 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4963 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4964 u8 rsp_result;
4965 if (result == -EINVAL)
4966 rsp_result = L2CAP_MR_BAD_ID;
4967 else
4968 rsp_result = L2CAP_MR_NOT_ALLOWED;
4970 l2cap_send_move_chan_rsp(chan, rsp_result);
4973 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4974 chan->move_state = L2CAP_MOVE_STABLE;
4976 /* Restart data transmission */
4977 l2cap_ertm_send(chan);
4980 /* Invoke with locked chan */
4981 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4983 u8 local_amp_id = chan->local_amp_id;
4984 u8 remote_amp_id = chan->remote_amp_id;
4986 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4987 chan, result, local_amp_id, remote_amp_id);
4989 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4990 return;
4992 if (chan->state != BT_CONNECTED) {
4993 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4994 } else if (result != L2CAP_MR_SUCCESS) {
4995 l2cap_do_move_cancel(chan, result);
4996 } else {
4997 switch (chan->move_role) {
4998 case L2CAP_MOVE_ROLE_INITIATOR:
4999 l2cap_do_move_initiate(chan, local_amp_id,
5000 remote_amp_id);
5001 break;
5002 case L2CAP_MOVE_ROLE_RESPONDER:
5003 l2cap_do_move_respond(chan, result);
5004 break;
5005 default:
5006 l2cap_do_move_cancel(chan, result);
5007 break;
5012 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5013 struct l2cap_cmd_hdr *cmd,
5014 u16 cmd_len, void *data)
5016 struct l2cap_move_chan_req *req = data;
5017 struct l2cap_move_chan_rsp rsp;
5018 struct l2cap_chan *chan;
5019 u16 icid = 0;
5020 u16 result = L2CAP_MR_NOT_ALLOWED;
5022 if (cmd_len != sizeof(*req))
5023 return -EPROTO;
5025 icid = le16_to_cpu(req->icid);
5027 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5029 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5030 return -EINVAL;
5032 chan = l2cap_get_chan_by_dcid(conn, icid);
5033 if (!chan) {
5034 rsp.icid = cpu_to_le16(icid);
5035 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5036 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5037 sizeof(rsp), &rsp);
5038 return 0;
5041 chan->ident = cmd->ident;
5043 if (chan->scid < L2CAP_CID_DYN_START ||
5044 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5045 (chan->mode != L2CAP_MODE_ERTM &&
5046 chan->mode != L2CAP_MODE_STREAMING)) {
5047 result = L2CAP_MR_NOT_ALLOWED;
5048 goto send_move_response;
5051 if (chan->local_amp_id == req->dest_amp_id) {
5052 result = L2CAP_MR_SAME_ID;
5053 goto send_move_response;
5056 if (req->dest_amp_id != AMP_ID_BREDR) {
5057 struct hci_dev *hdev;
5058 hdev = hci_dev_get(req->dest_amp_id);
5059 if (!hdev || hdev->dev_type != HCI_AMP ||
5060 !test_bit(HCI_UP, &hdev->flags)) {
5061 if (hdev)
5062 hci_dev_put(hdev);
5064 result = L2CAP_MR_BAD_ID;
5065 goto send_move_response;
5067 hci_dev_put(hdev);
5070 /* Detect a move collision. Only send a collision response
5071 * if this side has "lost", otherwise proceed with the move.
5072 * The winner has the larger bd_addr.
5074 if ((__chan_is_moving(chan) ||
5075 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5076 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5077 result = L2CAP_MR_COLLISION;
5078 goto send_move_response;
5081 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5082 l2cap_move_setup(chan);
5083 chan->move_id = req->dest_amp_id;
5085 if (req->dest_amp_id == AMP_ID_BREDR) {
5086 /* Moving to BR/EDR */
5087 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5088 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5089 result = L2CAP_MR_PEND;
5090 } else {
5091 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5092 result = L2CAP_MR_SUCCESS;
5094 } else {
5095 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5096 /* Placeholder - uncomment when amp functions are available */
5097 /*amp_accept_physical(chan, req->dest_amp_id);*/
5098 result = L2CAP_MR_PEND;
5101 send_move_response:
5102 l2cap_send_move_chan_rsp(chan, result);
5104 l2cap_chan_unlock(chan);
5106 return 0;
5109 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5111 struct l2cap_chan *chan;
5112 struct hci_chan *hchan = NULL;
5114 chan = l2cap_get_chan_by_scid(conn, icid);
5115 if (!chan) {
5116 l2cap_send_move_chan_cfm_icid(conn, icid);
5117 return;
5120 __clear_chan_timer(chan);
5121 if (result == L2CAP_MR_PEND)
5122 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5124 switch (chan->move_state) {
5125 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5126 /* Move confirm will be sent when logical link
5127 * is complete.
5129 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5130 break;
5131 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5132 if (result == L2CAP_MR_PEND) {
5133 break;
5134 } else if (test_bit(CONN_LOCAL_BUSY,
5135 &chan->conn_state)) {
5136 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5137 } else {
5138 /* Logical link is up or moving to BR/EDR,
5139 * proceed with move
5141 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5142 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5144 break;
5145 case L2CAP_MOVE_WAIT_RSP:
5146 /* Moving to AMP */
5147 if (result == L2CAP_MR_SUCCESS) {
5148 /* Remote is ready, send confirm immediately
5149 * after logical link is ready
5151 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5152 } else {
5153 /* Both logical link and move success
5154 * are required to confirm
5156 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5159 /* Placeholder - get hci_chan for logical link */
5160 if (!hchan) {
5161 /* Logical link not available */
5162 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5163 break;
5166 /* If the logical link is not yet connected, do not
5167 * send confirmation.
5169 if (hchan->state != BT_CONNECTED)
5170 break;
5172 /* Logical link is already ready to go */
5174 chan->hs_hcon = hchan->conn;
5175 chan->hs_hcon->l2cap_data = chan->conn;
5177 if (result == L2CAP_MR_SUCCESS) {
5178 /* Can confirm now */
5179 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5180 } else {
5181 /* Now only need move success
5182 * to confirm
5184 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5187 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5188 break;
5189 default:
5190 /* Any other amp move state means the move failed. */
5191 chan->move_id = chan->local_amp_id;
5192 l2cap_move_done(chan);
5193 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5196 l2cap_chan_unlock(chan);
5199 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5200 u16 result)
5202 struct l2cap_chan *chan;
5204 chan = l2cap_get_chan_by_ident(conn, ident);
5205 if (!chan) {
5206 /* Could not locate channel, icid is best guess */
5207 l2cap_send_move_chan_cfm_icid(conn, icid);
5208 return;
5211 __clear_chan_timer(chan);
5213 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5214 if (result == L2CAP_MR_COLLISION) {
5215 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5216 } else {
5217 /* Cleanup - cancel move */
5218 chan->move_id = chan->local_amp_id;
5219 l2cap_move_done(chan);
5223 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5225 l2cap_chan_unlock(chan);
5228 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5229 struct l2cap_cmd_hdr *cmd,
5230 u16 cmd_len, void *data)
5232 struct l2cap_move_chan_rsp *rsp = data;
5233 u16 icid, result;
5235 if (cmd_len != sizeof(*rsp))
5236 return -EPROTO;
5238 icid = le16_to_cpu(rsp->icid);
5239 result = le16_to_cpu(rsp->result);
5241 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5243 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5244 l2cap_move_continue(conn, icid, result);
5245 else
5246 l2cap_move_fail(conn, cmd->ident, icid, result);
5248 return 0;
5251 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5252 struct l2cap_cmd_hdr *cmd,
5253 u16 cmd_len, void *data)
5255 struct l2cap_move_chan_cfm *cfm = data;
5256 struct l2cap_chan *chan;
5257 u16 icid, result;
5259 if (cmd_len != sizeof(*cfm))
5260 return -EPROTO;
5262 icid = le16_to_cpu(cfm->icid);
5263 result = le16_to_cpu(cfm->result);
5265 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5267 chan = l2cap_get_chan_by_dcid(conn, icid);
5268 if (!chan) {
5269 /* Spec requires a response even if the icid was not found */
5270 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5271 return 0;
5274 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5275 if (result == L2CAP_MC_CONFIRMED) {
5276 chan->local_amp_id = chan->move_id;
5277 if (chan->local_amp_id == AMP_ID_BREDR)
5278 __release_logical_link(chan);
5279 } else {
5280 chan->move_id = chan->local_amp_id;
5283 l2cap_move_done(chan);
5286 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5288 l2cap_chan_unlock(chan);
5290 return 0;
5293 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5294 struct l2cap_cmd_hdr *cmd,
5295 u16 cmd_len, void *data)
5297 struct l2cap_move_chan_cfm_rsp *rsp = data;
5298 struct l2cap_chan *chan;
5299 u16 icid;
5301 if (cmd_len != sizeof(*rsp))
5302 return -EPROTO;
5304 icid = le16_to_cpu(rsp->icid);
5306 BT_DBG("icid 0x%4.4x", icid);
5308 chan = l2cap_get_chan_by_scid(conn, icid);
5309 if (!chan)
5310 return 0;
5312 __clear_chan_timer(chan);
5314 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5315 chan->local_amp_id = chan->move_id;
5317 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5318 __release_logical_link(chan);
5320 l2cap_move_done(chan);
5323 l2cap_chan_unlock(chan);
5325 return 0;
5328 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5329 struct l2cap_cmd_hdr *cmd,
5330 u16 cmd_len, u8 *data)
5332 struct hci_conn *hcon = conn->hcon;
5333 struct l2cap_conn_param_update_req *req;
5334 struct l2cap_conn_param_update_rsp rsp;
5335 u16 min, max, latency, to_multiplier;
5336 int err;
5338 if (hcon->role != HCI_ROLE_MASTER)
5339 return -EINVAL;
5341 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5342 return -EPROTO;
5344 req = (struct l2cap_conn_param_update_req *) data;
5345 min = __le16_to_cpu(req->min);
5346 max = __le16_to_cpu(req->max);
5347 latency = __le16_to_cpu(req->latency);
5348 to_multiplier = __le16_to_cpu(req->to_multiplier);
5350 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5351 min, max, latency, to_multiplier);
5353 memset(&rsp, 0, sizeof(rsp));
5355 err = hci_check_conn_params(min, max, latency, to_multiplier);
5356 if (err)
5357 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5358 else
5359 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5361 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5362 sizeof(rsp), &rsp);
5364 if (!err) {
5365 u8 store_hint;
5367 store_hint = hci_le_conn_update(hcon, min, max, latency,
5368 to_multiplier);
5369 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5370 store_hint, min, max, latency,
5371 to_multiplier);
5375 return 0;
5378 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5379 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5380 u8 *data)
5382 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5383 struct hci_conn *hcon = conn->hcon;
5384 u16 dcid, mtu, mps, credits, result;
5385 struct l2cap_chan *chan;
5386 int err, sec_level;
5388 if (cmd_len < sizeof(*rsp))
5389 return -EPROTO;
5391 dcid = __le16_to_cpu(rsp->dcid);
5392 mtu = __le16_to_cpu(rsp->mtu);
5393 mps = __le16_to_cpu(rsp->mps);
5394 credits = __le16_to_cpu(rsp->credits);
5395 result = __le16_to_cpu(rsp->result);
5397 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5398 dcid < L2CAP_CID_DYN_START ||
5399 dcid > L2CAP_CID_LE_DYN_END))
5400 return -EPROTO;
5402 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5403 dcid, mtu, mps, credits, result);
5405 mutex_lock(&conn->chan_lock);
5407 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5408 if (!chan) {
5409 err = -EBADSLT;
5410 goto unlock;
5413 err = 0;
5415 l2cap_chan_lock(chan);
5417 switch (result) {
5418 case L2CAP_CR_LE_SUCCESS:
5419 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5420 err = -EBADSLT;
5421 break;
5424 chan->ident = 0;
5425 chan->dcid = dcid;
5426 chan->omtu = mtu;
5427 chan->remote_mps = mps;
5428 chan->tx_credits = credits;
5429 l2cap_chan_ready(chan);
5430 break;
5432 case L2CAP_CR_LE_AUTHENTICATION:
5433 case L2CAP_CR_LE_ENCRYPTION:
5434 /* If we already have MITM protection we can't do
5435 * anything.
5437 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5438 l2cap_chan_del(chan, ECONNREFUSED);
5439 break;
5442 sec_level = hcon->sec_level + 1;
5443 if (chan->sec_level < sec_level)
5444 chan->sec_level = sec_level;
5446 /* We'll need to send a new Connect Request */
5447 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5449 smp_conn_security(hcon, chan->sec_level);
5450 break;
5452 default:
5453 l2cap_chan_del(chan, ECONNREFUSED);
5454 break;
5457 l2cap_chan_unlock(chan);
5459 unlock:
5460 mutex_unlock(&conn->chan_lock);
5462 return err;
5465 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5466 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5467 u8 *data)
5469 int err = 0;
5471 switch (cmd->code) {
5472 case L2CAP_COMMAND_REJ:
5473 l2cap_command_rej(conn, cmd, cmd_len, data);
5474 break;
5476 case L2CAP_CONN_REQ:
5477 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5478 break;
5480 case L2CAP_CONN_RSP:
5481 case L2CAP_CREATE_CHAN_RSP:
5482 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5483 break;
5485 case L2CAP_CONF_REQ:
5486 err = l2cap_config_req(conn, cmd, cmd_len, data);
5487 break;
5489 case L2CAP_CONF_RSP:
5490 l2cap_config_rsp(conn, cmd, cmd_len, data);
5491 break;
5493 case L2CAP_DISCONN_REQ:
5494 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5495 break;
5497 case L2CAP_DISCONN_RSP:
5498 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5499 break;
5501 case L2CAP_ECHO_REQ:
5502 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5503 break;
5505 case L2CAP_ECHO_RSP:
5506 break;
5508 case L2CAP_INFO_REQ:
5509 err = l2cap_information_req(conn, cmd, cmd_len, data);
5510 break;
5512 case L2CAP_INFO_RSP:
5513 l2cap_information_rsp(conn, cmd, cmd_len, data);
5514 break;
5516 case L2CAP_CREATE_CHAN_REQ:
5517 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5518 break;
5520 case L2CAP_MOVE_CHAN_REQ:
5521 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5522 break;
5524 case L2CAP_MOVE_CHAN_RSP:
5525 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5526 break;
5528 case L2CAP_MOVE_CHAN_CFM:
5529 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5530 break;
5532 case L2CAP_MOVE_CHAN_CFM_RSP:
5533 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5534 break;
5536 default:
5537 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5538 err = -EINVAL;
5539 break;
5542 return err;
5545 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5546 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5547 u8 *data)
5549 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5550 struct l2cap_le_conn_rsp rsp;
5551 struct l2cap_chan *chan, *pchan;
5552 u16 dcid, scid, credits, mtu, mps;
5553 __le16 psm;
5554 u8 result;
5556 if (cmd_len != sizeof(*req))
5557 return -EPROTO;
5559 scid = __le16_to_cpu(req->scid);
5560 mtu = __le16_to_cpu(req->mtu);
5561 mps = __le16_to_cpu(req->mps);
5562 psm = req->psm;
5563 dcid = 0;
5564 credits = 0;
5566 if (mtu < 23 || mps < 23)
5567 return -EPROTO;
5569 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5570 scid, mtu, mps);
5572 /* Check if we have socket listening on psm */
5573 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5574 &conn->hcon->dst, LE_LINK);
5575 if (!pchan) {
5576 result = L2CAP_CR_LE_BAD_PSM;
5577 chan = NULL;
5578 goto response;
5581 mutex_lock(&conn->chan_lock);
5582 l2cap_chan_lock(pchan);
5584 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5585 SMP_ALLOW_STK)) {
5586 result = L2CAP_CR_LE_AUTHENTICATION;
5587 chan = NULL;
5588 goto response_unlock;
5591 /* Check for valid dynamic CID range */
5592 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5593 result = L2CAP_CR_LE_INVALID_SCID;
5594 chan = NULL;
5595 goto response_unlock;
5598 /* Check if we already have channel with that dcid */
5599 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5600 result = L2CAP_CR_LE_SCID_IN_USE;
5601 chan = NULL;
5602 goto response_unlock;
5605 chan = pchan->ops->new_connection(pchan);
5606 if (!chan) {
5607 result = L2CAP_CR_LE_NO_MEM;
5608 goto response_unlock;
5611 bacpy(&chan->src, &conn->hcon->src);
5612 bacpy(&chan->dst, &conn->hcon->dst);
5613 chan->src_type = bdaddr_src_type(conn->hcon);
5614 chan->dst_type = bdaddr_dst_type(conn->hcon);
5615 chan->psm = psm;
5616 chan->dcid = scid;
5617 chan->omtu = mtu;
5618 chan->remote_mps = mps;
5620 __l2cap_chan_add(conn, chan);
5622 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5624 dcid = chan->scid;
5625 credits = chan->rx_credits;
5627 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5629 chan->ident = cmd->ident;
5631 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5632 l2cap_state_change(chan, BT_CONNECT2);
5633 /* The following result value is actually not defined
5634 * for LE CoC but we use it to let the function know
5635 * that it should bail out after doing its cleanup
5636 * instead of sending a response.
5638 result = L2CAP_CR_PEND;
5639 chan->ops->defer(chan);
5640 } else {
5641 l2cap_chan_ready(chan);
5642 result = L2CAP_CR_LE_SUCCESS;
5645 response_unlock:
5646 l2cap_chan_unlock(pchan);
5647 mutex_unlock(&conn->chan_lock);
5648 l2cap_chan_put(pchan);
5650 if (result == L2CAP_CR_PEND)
5651 return 0;
5653 response:
5654 if (chan) {
5655 rsp.mtu = cpu_to_le16(chan->imtu);
5656 rsp.mps = cpu_to_le16(chan->mps);
5657 } else {
5658 rsp.mtu = 0;
5659 rsp.mps = 0;
5662 rsp.dcid = cpu_to_le16(dcid);
5663 rsp.credits = cpu_to_le16(credits);
5664 rsp.result = cpu_to_le16(result);
5666 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5668 return 0;
5671 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5672 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5673 u8 *data)
5675 struct l2cap_le_credits *pkt;
5676 struct l2cap_chan *chan;
5677 u16 cid, credits, max_credits;
5679 if (cmd_len != sizeof(*pkt))
5680 return -EPROTO;
5682 pkt = (struct l2cap_le_credits *) data;
5683 cid = __le16_to_cpu(pkt->cid);
5684 credits = __le16_to_cpu(pkt->credits);
5686 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5688 chan = l2cap_get_chan_by_dcid(conn, cid);
5689 if (!chan)
5690 return -EBADSLT;
5692 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5693 if (credits > max_credits) {
5694 BT_ERR("LE credits overflow");
5695 l2cap_send_disconn_req(chan, ECONNRESET);
5696 l2cap_chan_unlock(chan);
5698 /* Return 0 so that we don't trigger an unnecessary
5699 * command reject packet.
5701 return 0;
5704 chan->tx_credits += credits;
5706 /* Resume sending */
5707 l2cap_le_flowctl_send(chan);
5709 if (chan->tx_credits)
5710 chan->ops->resume(chan);
5712 l2cap_chan_unlock(chan);
5714 return 0;
5717 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5718 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5719 u8 *data)
5721 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5722 struct l2cap_chan *chan;
5724 if (cmd_len < sizeof(*rej))
5725 return -EPROTO;
5727 mutex_lock(&conn->chan_lock);
5729 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5730 if (!chan)
5731 goto done;
5733 l2cap_chan_lock(chan);
5734 l2cap_chan_del(chan, ECONNREFUSED);
5735 l2cap_chan_unlock(chan);
5737 done:
5738 mutex_unlock(&conn->chan_lock);
5739 return 0;
5742 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5743 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5744 u8 *data)
5746 int err = 0;
5748 switch (cmd->code) {
5749 case L2CAP_COMMAND_REJ:
5750 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5751 break;
5753 case L2CAP_CONN_PARAM_UPDATE_REQ:
5754 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5755 break;
5757 case L2CAP_CONN_PARAM_UPDATE_RSP:
5758 break;
5760 case L2CAP_LE_CONN_RSP:
5761 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5762 break;
5764 case L2CAP_LE_CONN_REQ:
5765 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5766 break;
5768 case L2CAP_LE_CREDITS:
5769 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5770 break;
5772 case L2CAP_DISCONN_REQ:
5773 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5774 break;
5776 case L2CAP_DISCONN_RSP:
5777 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5778 break;
5780 default:
5781 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5782 err = -EINVAL;
5783 break;
5786 return err;
5789 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5790 struct sk_buff *skb)
5792 struct hci_conn *hcon = conn->hcon;
5793 struct l2cap_cmd_hdr *cmd;
5794 u16 len;
5795 int err;
5797 if (hcon->type != LE_LINK)
5798 goto drop;
5800 if (skb->len < L2CAP_CMD_HDR_SIZE)
5801 goto drop;
5803 cmd = (void *) skb->data;
5804 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5806 len = le16_to_cpu(cmd->len);
5808 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5810 if (len != skb->len || !cmd->ident) {
5811 BT_DBG("corrupted command");
5812 goto drop;
5815 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5816 if (err) {
5817 struct l2cap_cmd_rej_unk rej;
5819 BT_ERR("Wrong link type (%d)", err);
5821 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5822 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5823 sizeof(rej), &rej);
5826 drop:
5827 kfree_skb(skb);
5830 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5831 struct sk_buff *skb)
5833 struct hci_conn *hcon = conn->hcon;
5834 u8 *data = skb->data;
5835 int len = skb->len;
5836 struct l2cap_cmd_hdr cmd;
5837 int err;
5839 l2cap_raw_recv(conn, skb);
5841 if (hcon->type != ACL_LINK)
5842 goto drop;
5844 while (len >= L2CAP_CMD_HDR_SIZE) {
5845 u16 cmd_len;
5846 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5847 data += L2CAP_CMD_HDR_SIZE;
5848 len -= L2CAP_CMD_HDR_SIZE;
5850 cmd_len = le16_to_cpu(cmd.len);
5852 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5853 cmd.ident);
5855 if (cmd_len > len || !cmd.ident) {
5856 BT_DBG("corrupted command");
5857 break;
5860 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5861 if (err) {
5862 struct l2cap_cmd_rej_unk rej;
5864 BT_ERR("Wrong link type (%d)", err);
5866 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5867 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5868 sizeof(rej), &rej);
5871 data += cmd_len;
5872 len -= cmd_len;
5875 drop:
5876 kfree_skb(skb);
5879 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5881 u16 our_fcs, rcv_fcs;
5882 int hdr_size;
5884 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5885 hdr_size = L2CAP_EXT_HDR_SIZE;
5886 else
5887 hdr_size = L2CAP_ENH_HDR_SIZE;
5889 if (chan->fcs == L2CAP_FCS_CRC16) {
5890 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5891 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5892 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5894 if (our_fcs != rcv_fcs)
5895 return -EBADMSG;
5897 return 0;
5900 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5902 struct l2cap_ctrl control;
5904 BT_DBG("chan %p", chan);
5906 memset(&control, 0, sizeof(control));
5907 control.sframe = 1;
5908 control.final = 1;
5909 control.reqseq = chan->buffer_seq;
5910 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5912 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5913 control.super = L2CAP_SUPER_RNR;
5914 l2cap_send_sframe(chan, &control);
5917 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5918 chan->unacked_frames > 0)
5919 __set_retrans_timer(chan);
5921 /* Send pending iframes */
5922 l2cap_ertm_send(chan);
5924 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5925 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5926 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5927 * send it now.
5929 control.super = L2CAP_SUPER_RR;
5930 l2cap_send_sframe(chan, &control);
5934 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5935 struct sk_buff **last_frag)
5937 /* skb->len reflects data in skb as well as all fragments
5938 * skb->data_len reflects only data in fragments
5940 if (!skb_has_frag_list(skb))
5941 skb_shinfo(skb)->frag_list = new_frag;
5943 new_frag->next = NULL;
5945 (*last_frag)->next = new_frag;
5946 *last_frag = new_frag;
5948 skb->len += new_frag->len;
5949 skb->data_len += new_frag->len;
5950 skb->truesize += new_frag->truesize;
5953 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5954 struct l2cap_ctrl *control)
5956 int err = -EINVAL;
5958 switch (control->sar) {
5959 case L2CAP_SAR_UNSEGMENTED:
5960 if (chan->sdu)
5961 break;
5963 err = chan->ops->recv(chan, skb);
5964 break;
5966 case L2CAP_SAR_START:
5967 if (chan->sdu)
5968 break;
5970 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5971 break;
5973 chan->sdu_len = get_unaligned_le16(skb->data);
5974 skb_pull(skb, L2CAP_SDULEN_SIZE);
5976 if (chan->sdu_len > chan->imtu) {
5977 err = -EMSGSIZE;
5978 break;
5981 if (skb->len >= chan->sdu_len)
5982 break;
5984 chan->sdu = skb;
5985 chan->sdu_last_frag = skb;
5987 skb = NULL;
5988 err = 0;
5989 break;
5991 case L2CAP_SAR_CONTINUE:
5992 if (!chan->sdu)
5993 break;
5995 append_skb_frag(chan->sdu, skb,
5996 &chan->sdu_last_frag);
5997 skb = NULL;
5999 if (chan->sdu->len >= chan->sdu_len)
6000 break;
6002 err = 0;
6003 break;
6005 case L2CAP_SAR_END:
6006 if (!chan->sdu)
6007 break;
6009 append_skb_frag(chan->sdu, skb,
6010 &chan->sdu_last_frag);
6011 skb = NULL;
6013 if (chan->sdu->len != chan->sdu_len)
6014 break;
6016 err = chan->ops->recv(chan, chan->sdu);
6018 if (!err) {
6019 /* Reassembly complete */
6020 chan->sdu = NULL;
6021 chan->sdu_last_frag = NULL;
6022 chan->sdu_len = 0;
6024 break;
6027 if (err) {
6028 kfree_skb(skb);
6029 kfree_skb(chan->sdu);
6030 chan->sdu = NULL;
6031 chan->sdu_last_frag = NULL;
6032 chan->sdu_len = 0;
6035 return err;
6038 static int l2cap_resegment(struct l2cap_chan *chan)
6040 /* Placeholder */
6041 return 0;
6044 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6046 u8 event;
6048 if (chan->mode != L2CAP_MODE_ERTM)
6049 return;
6051 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6052 l2cap_tx(chan, NULL, NULL, event);
6055 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6057 int err = 0;
6058 /* Pass sequential frames to l2cap_reassemble_sdu()
6059 * until a gap is encountered.
6062 BT_DBG("chan %p", chan);
6064 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6065 struct sk_buff *skb;
6066 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6067 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6069 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6071 if (!skb)
6072 break;
6074 skb_unlink(skb, &chan->srej_q);
6075 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6076 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6077 if (err)
6078 break;
6081 if (skb_queue_empty(&chan->srej_q)) {
6082 chan->rx_state = L2CAP_RX_STATE_RECV;
6083 l2cap_send_ack(chan);
6086 return err;
6089 static void l2cap_handle_srej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6092 struct sk_buff *skb;
6094 BT_DBG("chan %p, control %p", chan, control);
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6099 return;
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6104 if (skb == NULL) {
6105 BT_DBG("Seq %d not available for retransmission",
6106 control->reqseq);
6107 return;
6110 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6111 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6112 l2cap_send_disconn_req(chan, ECONNRESET);
6113 return;
6116 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6118 if (control->poll) {
6119 l2cap_pass_to_tx(chan, control);
6121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6122 l2cap_retransmit(chan, control);
6123 l2cap_ertm_send(chan);
6125 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6126 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6127 chan->srej_save_reqseq = control->reqseq;
6129 } else {
6130 l2cap_pass_to_tx_fbit(chan, control);
6132 if (control->final) {
6133 if (chan->srej_save_reqseq != control->reqseq ||
6134 !test_and_clear_bit(CONN_SREJ_ACT,
6135 &chan->conn_state))
6136 l2cap_retransmit(chan, control);
6137 } else {
6138 l2cap_retransmit(chan, control);
6139 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6140 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6141 chan->srej_save_reqseq = control->reqseq;
6147 static void l2cap_handle_rej(struct l2cap_chan *chan,
6148 struct l2cap_ctrl *control)
6150 struct sk_buff *skb;
6152 BT_DBG("chan %p, control %p", chan, control);
6154 if (control->reqseq == chan->next_tx_seq) {
6155 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6156 l2cap_send_disconn_req(chan, ECONNRESET);
6157 return;
6160 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6162 if (chan->max_tx && skb &&
6163 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6164 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6165 l2cap_send_disconn_req(chan, ECONNRESET);
6166 return;
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6171 l2cap_pass_to_tx(chan, control);
6173 if (control->final) {
6174 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6175 l2cap_retransmit_all(chan, control);
6176 } else {
6177 l2cap_retransmit_all(chan, control);
6178 l2cap_ertm_send(chan);
6179 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6180 set_bit(CONN_REJ_ACT, &chan->conn_state);
6184 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6186 BT_DBG("chan %p, txseq %d", chan, txseq);
6188 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6189 chan->expected_tx_seq);
6191 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6192 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6193 chan->tx_win) {
6194 /* See notes below regarding "double poll" and
6195 * invalid packets.
6197 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - after SREJ");
6199 return L2CAP_TXSEQ_INVALID_IGNORE;
6200 } else {
6201 BT_DBG("Invalid - in window after SREJ sent");
6202 return L2CAP_TXSEQ_INVALID;
6206 if (chan->srej_list.head == txseq) {
6207 BT_DBG("Expected SREJ");
6208 return L2CAP_TXSEQ_EXPECTED_SREJ;
6211 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6212 BT_DBG("Duplicate SREJ - txseq already stored");
6213 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6216 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6217 BT_DBG("Unexpected SREJ - not requested");
6218 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6222 if (chan->expected_tx_seq == txseq) {
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6224 chan->tx_win) {
6225 BT_DBG("Invalid - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID;
6227 } else {
6228 BT_DBG("Expected");
6229 return L2CAP_TXSEQ_EXPECTED;
6233 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6234 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6235 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6236 return L2CAP_TXSEQ_DUPLICATE;
6239 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6240 /* A source of invalid packets is a "double poll" condition,
6241 * where delays cause us to send multiple poll packets. If
6242 * the remote stack receives and processes both polls,
6243 * sequence numbers can wrap around in such a way that a
6244 * resent frame has a sequence number that looks like new data
6245 * with a sequence gap. This would trigger an erroneous SREJ
6246 * request.
6248 * Fortunately, this is impossible with a tx window that's
6249 * less than half of the maximum sequence number, which allows
6250 * invalid frames to be safely ignored.
6252 * With tx window sizes greater than half of the tx window
6253 * maximum, the frame is invalid and cannot be ignored. This
6254 * causes a disconnect.
6257 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6258 BT_DBG("Invalid/Ignore - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID_IGNORE;
6260 } else {
6261 BT_DBG("Invalid - txseq outside tx window");
6262 return L2CAP_TXSEQ_INVALID;
6264 } else {
6265 BT_DBG("Unexpected - txseq indicates missing frames");
6266 return L2CAP_TXSEQ_UNEXPECTED;
6270 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6271 struct l2cap_ctrl *control,
6272 struct sk_buff *skb, u8 event)
6274 int err = 0;
6275 bool skb_in_use = false;
6277 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6278 event);
6280 switch (event) {
6281 case L2CAP_EV_RECV_IFRAME:
6282 switch (l2cap_classify_txseq(chan, control->txseq)) {
6283 case L2CAP_TXSEQ_EXPECTED:
6284 l2cap_pass_to_tx(chan, control);
6286 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6287 BT_DBG("Busy, discarding expected seq %d",
6288 control->txseq);
6289 break;
6292 chan->expected_tx_seq = __next_seq(chan,
6293 control->txseq);
6295 chan->buffer_seq = chan->expected_tx_seq;
6296 skb_in_use = true;
6298 err = l2cap_reassemble_sdu(chan, skb, control);
6299 if (err)
6300 break;
6302 if (control->final) {
6303 if (!test_and_clear_bit(CONN_REJ_ACT,
6304 &chan->conn_state)) {
6305 control->final = 0;
6306 l2cap_retransmit_all(chan, control);
6307 l2cap_ertm_send(chan);
6311 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6312 l2cap_send_ack(chan);
6313 break;
6314 case L2CAP_TXSEQ_UNEXPECTED:
6315 l2cap_pass_to_tx(chan, control);
6317 /* Can't issue SREJ frames in the local busy state.
6318 * Drop this frame, it will be seen as missing
6319 * when local busy is exited.
6321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6322 BT_DBG("Busy, discarding unexpected seq %d",
6323 control->txseq);
6324 break;
6327 /* There was a gap in the sequence, so an SREJ
6328 * must be sent for each missing frame. The
6329 * current frame is stored for later use.
6331 skb_queue_tail(&chan->srej_q, skb);
6332 skb_in_use = true;
6333 BT_DBG("Queued %p (queue len %d)", skb,
6334 skb_queue_len(&chan->srej_q));
6336 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6337 l2cap_seq_list_clear(&chan->srej_list);
6338 l2cap_send_srej(chan, control->txseq);
6340 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6341 break;
6342 case L2CAP_TXSEQ_DUPLICATE:
6343 l2cap_pass_to_tx(chan, control);
6344 break;
6345 case L2CAP_TXSEQ_INVALID_IGNORE:
6346 break;
6347 case L2CAP_TXSEQ_INVALID:
6348 default:
6349 l2cap_send_disconn_req(chan, ECONNRESET);
6350 break;
6352 break;
6353 case L2CAP_EV_RECV_RR:
6354 l2cap_pass_to_tx(chan, control);
6355 if (control->final) {
6356 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6358 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6359 !__chan_is_moving(chan)) {
6360 control->final = 0;
6361 l2cap_retransmit_all(chan, control);
6364 l2cap_ertm_send(chan);
6365 } else if (control->poll) {
6366 l2cap_send_i_or_rr_or_rnr(chan);
6367 } else {
6368 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6369 &chan->conn_state) &&
6370 chan->unacked_frames)
6371 __set_retrans_timer(chan);
6373 l2cap_ertm_send(chan);
6375 break;
6376 case L2CAP_EV_RECV_RNR:
6377 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6378 l2cap_pass_to_tx(chan, control);
6379 if (control && control->poll) {
6380 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6381 l2cap_send_rr_or_rnr(chan, 0);
6383 __clear_retrans_timer(chan);
6384 l2cap_seq_list_clear(&chan->retrans_list);
6385 break;
6386 case L2CAP_EV_RECV_REJ:
6387 l2cap_handle_rej(chan, control);
6388 break;
6389 case L2CAP_EV_RECV_SREJ:
6390 l2cap_handle_srej(chan, control);
6391 break;
6392 default:
6393 break;
6396 if (skb && !skb_in_use) {
6397 BT_DBG("Freeing %p", skb);
6398 kfree_skb(skb);
6401 return err;
6404 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6405 struct l2cap_ctrl *control,
6406 struct sk_buff *skb, u8 event)
6408 int err = 0;
6409 u16 txseq = control->txseq;
6410 bool skb_in_use = false;
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6413 event);
6415 switch (event) {
6416 case L2CAP_EV_RECV_IFRAME:
6417 switch (l2cap_classify_txseq(chan, txseq)) {
6418 case L2CAP_TXSEQ_EXPECTED:
6419 /* Keep frame for reassembly later */
6420 l2cap_pass_to_tx(chan, control);
6421 skb_queue_tail(&chan->srej_q, skb);
6422 skb_in_use = true;
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(&chan->srej_q));
6426 chan->expected_tx_seq = __next_seq(chan, txseq);
6427 break;
6428 case L2CAP_TXSEQ_EXPECTED_SREJ:
6429 l2cap_seq_list_pop(&chan->srej_list);
6431 l2cap_pass_to_tx(chan, control);
6432 skb_queue_tail(&chan->srej_q, skb);
6433 skb_in_use = true;
6434 BT_DBG("Queued %p (queue len %d)", skb,
6435 skb_queue_len(&chan->srej_q));
6437 err = l2cap_rx_queued_iframes(chan);
6438 if (err)
6439 break;
6441 break;
6442 case L2CAP_TXSEQ_UNEXPECTED:
6443 /* Got a frame that can't be reassembled yet.
6444 * Save it for later, and send SREJs to cover
6445 * the missing frames.
6447 skb_queue_tail(&chan->srej_q, skb);
6448 skb_in_use = true;
6449 BT_DBG("Queued %p (queue len %d)", skb,
6450 skb_queue_len(&chan->srej_q));
6452 l2cap_pass_to_tx(chan, control);
6453 l2cap_send_srej(chan, control->txseq);
6454 break;
6455 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6456 /* This frame was requested with an SREJ, but
6457 * some expected retransmitted frames are
6458 * missing. Request retransmission of missing
6459 * SREJ'd frames.
6461 skb_queue_tail(&chan->srej_q, skb);
6462 skb_in_use = true;
6463 BT_DBG("Queued %p (queue len %d)", skb,
6464 skb_queue_len(&chan->srej_q));
6466 l2cap_pass_to_tx(chan, control);
6467 l2cap_send_srej_list(chan, control->txseq);
6468 break;
6469 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6470 /* We've already queued this frame. Drop this copy. */
6471 l2cap_pass_to_tx(chan, control);
6472 break;
6473 case L2CAP_TXSEQ_DUPLICATE:
6474 /* Expecting a later sequence number, so this frame
6475 * was already received. Ignore it completely.
6477 break;
6478 case L2CAP_TXSEQ_INVALID_IGNORE:
6479 break;
6480 case L2CAP_TXSEQ_INVALID:
6481 default:
6482 l2cap_send_disconn_req(chan, ECONNRESET);
6483 break;
6485 break;
6486 case L2CAP_EV_RECV_RR:
6487 l2cap_pass_to_tx(chan, control);
6488 if (control->final) {
6489 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6491 if (!test_and_clear_bit(CONN_REJ_ACT,
6492 &chan->conn_state)) {
6493 control->final = 0;
6494 l2cap_retransmit_all(chan, control);
6497 l2cap_ertm_send(chan);
6498 } else if (control->poll) {
6499 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6500 &chan->conn_state) &&
6501 chan->unacked_frames) {
6502 __set_retrans_timer(chan);
6505 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6506 l2cap_send_srej_tail(chan);
6507 } else {
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6509 &chan->conn_state) &&
6510 chan->unacked_frames)
6511 __set_retrans_timer(chan);
6513 l2cap_send_ack(chan);
6515 break;
6516 case L2CAP_EV_RECV_RNR:
6517 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6518 l2cap_pass_to_tx(chan, control);
6519 if (control->poll) {
6520 l2cap_send_srej_tail(chan);
6521 } else {
6522 struct l2cap_ctrl rr_control;
6523 memset(&rr_control, 0, sizeof(rr_control));
6524 rr_control.sframe = 1;
6525 rr_control.super = L2CAP_SUPER_RR;
6526 rr_control.reqseq = chan->buffer_seq;
6527 l2cap_send_sframe(chan, &rr_control);
6530 break;
6531 case L2CAP_EV_RECV_REJ:
6532 l2cap_handle_rej(chan, control);
6533 break;
6534 case L2CAP_EV_RECV_SREJ:
6535 l2cap_handle_srej(chan, control);
6536 break;
6539 if (skb && !skb_in_use) {
6540 BT_DBG("Freeing %p", skb);
6541 kfree_skb(skb);
6544 return err;
6547 static int l2cap_finish_move(struct l2cap_chan *chan)
6549 BT_DBG("chan %p", chan);
6551 chan->rx_state = L2CAP_RX_STATE_RECV;
6553 if (chan->hs_hcon)
6554 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6555 else
6556 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6558 return l2cap_resegment(chan);
6561 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6562 struct l2cap_ctrl *control,
6563 struct sk_buff *skb, u8 event)
6565 int err;
6567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6568 event);
6570 if (!control->poll)
6571 return -EPROTO;
6573 l2cap_process_reqseq(chan, control->reqseq);
6575 if (!skb_queue_empty(&chan->tx_q))
6576 chan->tx_send_head = skb_peek(&chan->tx_q);
6577 else
6578 chan->tx_send_head = NULL;
6580 /* Rewind next_tx_seq to the point expected
6581 * by the receiver.
6583 chan->next_tx_seq = control->reqseq;
6584 chan->unacked_frames = 0;
6586 err = l2cap_finish_move(chan);
6587 if (err)
6588 return err;
6590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6591 l2cap_send_i_or_rr_or_rnr(chan);
6593 if (event == L2CAP_EV_RECV_IFRAME)
6594 return -EPROTO;
6596 return l2cap_rx_state_recv(chan, control, NULL, event);
6599 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6600 struct l2cap_ctrl *control,
6601 struct sk_buff *skb, u8 event)
6603 int err;
6605 if (!control->final)
6606 return -EPROTO;
6608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6610 chan->rx_state = L2CAP_RX_STATE_RECV;
6611 l2cap_process_reqseq(chan, control->reqseq);
6613 if (!skb_queue_empty(&chan->tx_q))
6614 chan->tx_send_head = skb_peek(&chan->tx_q);
6615 else
6616 chan->tx_send_head = NULL;
6618 /* Rewind next_tx_seq to the point expected
6619 * by the receiver.
6621 chan->next_tx_seq = control->reqseq;
6622 chan->unacked_frames = 0;
6624 if (chan->hs_hcon)
6625 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6626 else
6627 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6629 err = l2cap_resegment(chan);
6631 if (!err)
6632 err = l2cap_rx_state_recv(chan, control, skb, event);
6634 return err;
6637 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6639 /* Make sure reqseq is for a packet that has been sent but not acked */
6640 u16 unacked;
6642 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6643 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6646 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6647 struct sk_buff *skb, u8 event)
6649 int err = 0;
6651 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6652 control, skb, event, chan->rx_state);
6654 if (__valid_reqseq(chan, control->reqseq)) {
6655 switch (chan->rx_state) {
6656 case L2CAP_RX_STATE_RECV:
6657 err = l2cap_rx_state_recv(chan, control, skb, event);
6658 break;
6659 case L2CAP_RX_STATE_SREJ_SENT:
6660 err = l2cap_rx_state_srej_sent(chan, control, skb,
6661 event);
6662 break;
6663 case L2CAP_RX_STATE_WAIT_P:
6664 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6665 break;
6666 case L2CAP_RX_STATE_WAIT_F:
6667 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6668 break;
6669 default:
6670 /* shut it down */
6671 break;
6673 } else {
6674 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6675 control->reqseq, chan->next_tx_seq,
6676 chan->expected_ack_seq);
6677 l2cap_send_disconn_req(chan, ECONNRESET);
6680 return err;
6683 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6684 struct sk_buff *skb)
6686 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6687 chan->rx_state);
6689 if (l2cap_classify_txseq(chan, control->txseq) ==
6690 L2CAP_TXSEQ_EXPECTED) {
6691 l2cap_pass_to_tx(chan, control);
6693 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6694 __next_seq(chan, chan->buffer_seq));
6696 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6698 l2cap_reassemble_sdu(chan, skb, control);
6699 } else {
6700 if (chan->sdu) {
6701 kfree_skb(chan->sdu);
6702 chan->sdu = NULL;
6704 chan->sdu_last_frag = NULL;
6705 chan->sdu_len = 0;
6707 if (skb) {
6708 BT_DBG("Freeing %p", skb);
6709 kfree_skb(skb);
6713 chan->last_acked_seq = control->txseq;
6714 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6716 return 0;
6719 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6721 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6722 u16 len;
6723 u8 event;
6725 __unpack_control(chan, skb);
6727 len = skb->len;
6730 * We can just drop the corrupted I-frame here.
6731 * Receiver will miss it and start proper recovery
6732 * procedures and ask for retransmission.
6734 if (l2cap_check_fcs(chan, skb))
6735 goto drop;
6737 if (!control->sframe && control->sar == L2CAP_SAR_START)
6738 len -= L2CAP_SDULEN_SIZE;
6740 if (chan->fcs == L2CAP_FCS_CRC16)
6741 len -= L2CAP_FCS_SIZE;
6743 if (len > chan->mps) {
6744 l2cap_send_disconn_req(chan, ECONNRESET);
6745 goto drop;
6748 if ((chan->mode == L2CAP_MODE_ERTM ||
6749 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6750 goto drop;
6752 if (!control->sframe) {
6753 int err;
6755 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6756 control->sar, control->reqseq, control->final,
6757 control->txseq);
6759 /* Validate F-bit - F=0 always valid, F=1 only
6760 * valid in TX WAIT_F
6762 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6763 goto drop;
6765 if (chan->mode != L2CAP_MODE_STREAMING) {
6766 event = L2CAP_EV_RECV_IFRAME;
6767 err = l2cap_rx(chan, control, skb, event);
6768 } else {
6769 err = l2cap_stream_rx(chan, control, skb);
6772 if (err)
6773 l2cap_send_disconn_req(chan, ECONNRESET);
6774 } else {
6775 const u8 rx_func_to_event[4] = {
6776 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6777 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6780 /* Only I-frames are expected in streaming mode */
6781 if (chan->mode == L2CAP_MODE_STREAMING)
6782 goto drop;
6784 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6785 control->reqseq, control->final, control->poll,
6786 control->super);
6788 if (len != 0) {
6789 BT_ERR("Trailing bytes: %d in sframe", len);
6790 l2cap_send_disconn_req(chan, ECONNRESET);
6791 goto drop;
6794 /* Validate F and P bits */
6795 if (control->final && (control->poll ||
6796 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6797 goto drop;
6799 event = rx_func_to_event[control->super];
6800 if (l2cap_rx(chan, control, skb, event))
6801 l2cap_send_disconn_req(chan, ECONNRESET);
6804 return 0;
6806 drop:
6807 kfree_skb(skb);
6808 return 0;
6811 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6813 struct l2cap_conn *conn = chan->conn;
6814 struct l2cap_le_credits pkt;
6815 u16 return_credits;
6817 return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6819 if (!return_credits)
6820 return;
6822 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6824 chan->rx_credits += return_credits;
6826 pkt.cid = cpu_to_le16(chan->scid);
6827 pkt.credits = cpu_to_le16(return_credits);
6829 chan->ident = l2cap_get_ident(conn);
6831 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6834 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6836 int err;
6838 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6840 /* Wait recv to confirm reception before updating the credits */
6841 err = chan->ops->recv(chan, skb);
6843 /* Update credits whenever an SDU is received */
6844 l2cap_chan_le_send_credits(chan);
6846 return err;
6849 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6851 int err;
6853 if (!chan->rx_credits) {
6854 BT_ERR("No credits to receive LE L2CAP data");
6855 l2cap_send_disconn_req(chan, ECONNRESET);
6856 return -ENOBUFS;
6859 if (chan->imtu < skb->len) {
6860 BT_ERR("Too big LE L2CAP PDU");
6861 return -ENOBUFS;
6864 chan->rx_credits--;
6865 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6867 /* Update if remote had run out of credits, this should only happens
6868 * if the remote is not using the entire MPS.
6870 if (!chan->rx_credits)
6871 l2cap_chan_le_send_credits(chan);
6873 err = 0;
6875 if (!chan->sdu) {
6876 u16 sdu_len;
6878 sdu_len = get_unaligned_le16(skb->data);
6879 skb_pull(skb, L2CAP_SDULEN_SIZE);
6881 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6882 sdu_len, skb->len, chan->imtu);
6884 if (sdu_len > chan->imtu) {
6885 BT_ERR("Too big LE L2CAP SDU length received");
6886 err = -EMSGSIZE;
6887 goto failed;
6890 if (skb->len > sdu_len) {
6891 BT_ERR("Too much LE L2CAP data received");
6892 err = -EINVAL;
6893 goto failed;
6896 if (skb->len == sdu_len)
6897 return l2cap_le_recv(chan, skb);
6899 chan->sdu = skb;
6900 chan->sdu_len = sdu_len;
6901 chan->sdu_last_frag = skb;
6903 /* Detect if remote is not able to use the selected MPS */
6904 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6905 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6907 /* Adjust the number of credits */
6908 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6909 chan->mps = mps_len;
6910 l2cap_chan_le_send_credits(chan);
6913 return 0;
6916 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6917 chan->sdu->len, skb->len, chan->sdu_len);
6919 if (chan->sdu->len + skb->len > chan->sdu_len) {
6920 BT_ERR("Too much LE L2CAP data received");
6921 err = -EINVAL;
6922 goto failed;
6925 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6926 skb = NULL;
6928 if (chan->sdu->len == chan->sdu_len) {
6929 err = l2cap_le_recv(chan, chan->sdu);
6930 if (!err) {
6931 chan->sdu = NULL;
6932 chan->sdu_last_frag = NULL;
6933 chan->sdu_len = 0;
6937 failed:
6938 if (err) {
6939 kfree_skb(skb);
6940 kfree_skb(chan->sdu);
6941 chan->sdu = NULL;
6942 chan->sdu_last_frag = NULL;
6943 chan->sdu_len = 0;
6946 /* We can't return an error here since we took care of the skb
6947 * freeing internally. An error return would cause the caller to
6948 * do a double-free of the skb.
6950 return 0;
6953 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6954 struct sk_buff *skb)
6956 struct l2cap_chan *chan;
6958 chan = l2cap_get_chan_by_scid(conn, cid);
6959 if (!chan) {
6960 if (cid == L2CAP_CID_A2MP) {
6961 chan = a2mp_channel_create(conn, skb);
6962 if (!chan) {
6963 kfree_skb(skb);
6964 return;
6967 l2cap_chan_lock(chan);
6968 } else {
6969 BT_DBG("unknown cid 0x%4.4x", cid);
6970 /* Drop packet and return */
6971 kfree_skb(skb);
6972 return;
6976 BT_DBG("chan %p, len %d", chan, skb->len);
6978 /* If we receive data on a fixed channel before the info req/rsp
6979 * procdure is done simply assume that the channel is supported
6980 * and mark it as ready.
6982 if (chan->chan_type == L2CAP_CHAN_FIXED)
6983 l2cap_chan_ready(chan);
6985 if (chan->state != BT_CONNECTED)
6986 goto drop;
6988 switch (chan->mode) {
6989 case L2CAP_MODE_LE_FLOWCTL:
6990 if (l2cap_le_data_rcv(chan, skb) < 0)
6991 goto drop;
6993 goto done;
6995 case L2CAP_MODE_BASIC:
6996 /* If socket recv buffers overflows we drop data here
6997 * which is *bad* because L2CAP has to be reliable.
6998 * But we don't have any other choice. L2CAP doesn't
6999 * provide flow control mechanism. */
7001 if (chan->imtu < skb->len) {
7002 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7003 goto drop;
7006 if (!chan->ops->recv(chan, skb))
7007 goto done;
7008 break;
7010 case L2CAP_MODE_ERTM:
7011 case L2CAP_MODE_STREAMING:
7012 l2cap_data_rcv(chan, skb);
7013 goto done;
7015 default:
7016 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7017 break;
7020 drop:
7021 kfree_skb(skb);
7023 done:
7024 l2cap_chan_unlock(chan);
7027 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7028 struct sk_buff *skb)
7030 struct hci_conn *hcon = conn->hcon;
7031 struct l2cap_chan *chan;
7033 if (hcon->type != ACL_LINK)
7034 goto free_skb;
7036 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7037 ACL_LINK);
7038 if (!chan)
7039 goto free_skb;
7041 BT_DBG("chan %p, len %d", chan, skb->len);
7043 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7044 goto drop;
7046 if (chan->imtu < skb->len)
7047 goto drop;
7049 /* Store remote BD_ADDR and PSM for msg_name */
7050 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7051 bt_cb(skb)->l2cap.psm = psm;
7053 if (!chan->ops->recv(chan, skb)) {
7054 l2cap_chan_put(chan);
7055 return;
7058 drop:
7059 l2cap_chan_put(chan);
7060 free_skb:
7061 kfree_skb(skb);
7064 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7066 struct l2cap_hdr *lh = (void *) skb->data;
7067 struct hci_conn *hcon = conn->hcon;
7068 u16 cid, len;
7069 __le16 psm;
7071 if (hcon->state != BT_CONNECTED) {
7072 BT_DBG("queueing pending rx skb");
7073 skb_queue_tail(&conn->pending_rx, skb);
7074 return;
7077 skb_pull(skb, L2CAP_HDR_SIZE);
7078 cid = __le16_to_cpu(lh->cid);
7079 len = __le16_to_cpu(lh->len);
7081 if (len != skb->len) {
7082 kfree_skb(skb);
7083 return;
7086 /* Since we can't actively block incoming LE connections we must
7087 * at least ensure that we ignore incoming data from them.
7089 if (hcon->type == LE_LINK &&
7090 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7091 bdaddr_dst_type(hcon))) {
7092 kfree_skb(skb);
7093 return;
7096 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7098 switch (cid) {
7099 case L2CAP_CID_SIGNALING:
7100 l2cap_sig_channel(conn, skb);
7101 break;
7103 case L2CAP_CID_CONN_LESS:
7104 psm = get_unaligned((__le16 *) skb->data);
7105 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7106 l2cap_conless_channel(conn, psm, skb);
7107 break;
7109 case L2CAP_CID_LE_SIGNALING:
7110 l2cap_le_sig_channel(conn, skb);
7111 break;
7113 default:
7114 l2cap_data_channel(conn, cid, skb);
7115 break;
7119 static void process_pending_rx(struct work_struct *work)
7121 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7122 pending_rx_work);
7123 struct sk_buff *skb;
7125 BT_DBG("");
7127 while ((skb = skb_dequeue(&conn->pending_rx)))
7128 l2cap_recv_frame(conn, skb);
7131 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7133 struct l2cap_conn *conn = hcon->l2cap_data;
7134 struct hci_chan *hchan;
7136 if (conn)
7137 return conn;
7139 hchan = hci_chan_create(hcon);
7140 if (!hchan)
7141 return NULL;
7143 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7144 if (!conn) {
7145 hci_chan_del(hchan);
7146 return NULL;
7149 kref_init(&conn->ref);
7150 hcon->l2cap_data = conn;
7151 conn->hcon = hci_conn_get(hcon);
7152 conn->hchan = hchan;
7154 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7156 switch (hcon->type) {
7157 case LE_LINK:
7158 if (hcon->hdev->le_mtu) {
7159 conn->mtu = hcon->hdev->le_mtu;
7160 break;
7162 /* fall through */
7163 default:
7164 conn->mtu = hcon->hdev->acl_mtu;
7165 break;
7168 conn->feat_mask = 0;
7170 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7172 if (hcon->type == ACL_LINK &&
7173 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7174 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7176 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7177 (bredr_sc_enabled(hcon->hdev) ||
7178 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7179 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7181 mutex_init(&conn->ident_lock);
7182 mutex_init(&conn->chan_lock);
7184 INIT_LIST_HEAD(&conn->chan_l);
7185 INIT_LIST_HEAD(&conn->users);
7187 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7189 skb_queue_head_init(&conn->pending_rx);
7190 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7191 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7193 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7195 return conn;
7198 static bool is_valid_psm(u16 psm, u8 dst_type) {
7199 if (!psm)
7200 return false;
7202 if (bdaddr_type_is_le(dst_type))
7203 return (psm <= 0x00ff);
7205 /* PSM must be odd and lsb of upper byte must be 0 */
7206 return ((psm & 0x0101) == 0x0001);
7209 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7210 bdaddr_t *dst, u8 dst_type)
7212 struct l2cap_conn *conn;
7213 struct hci_conn *hcon;
7214 struct hci_dev *hdev;
7215 int err;
7217 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7218 dst_type, __le16_to_cpu(psm));
7220 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7221 if (!hdev)
7222 return -EHOSTUNREACH;
7224 hci_dev_lock(hdev);
7226 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7227 chan->chan_type != L2CAP_CHAN_RAW) {
7228 err = -EINVAL;
7229 goto done;
7232 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7233 err = -EINVAL;
7234 goto done;
7237 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7238 err = -EINVAL;
7239 goto done;
7242 switch (chan->mode) {
7243 case L2CAP_MODE_BASIC:
7244 break;
7245 case L2CAP_MODE_LE_FLOWCTL:
7246 break;
7247 case L2CAP_MODE_ERTM:
7248 case L2CAP_MODE_STREAMING:
7249 if (!disable_ertm)
7250 break;
7251 /* fall through */
7252 default:
7253 err = -EOPNOTSUPP;
7254 goto done;
7257 switch (chan->state) {
7258 case BT_CONNECT:
7259 case BT_CONNECT2:
7260 case BT_CONFIG:
7261 /* Already connecting */
7262 err = 0;
7263 goto done;
7265 case BT_CONNECTED:
7266 /* Already connected */
7267 err = -EISCONN;
7268 goto done;
7270 case BT_OPEN:
7271 case BT_BOUND:
7272 /* Can connect */
7273 break;
7275 default:
7276 err = -EBADFD;
7277 goto done;
7280 /* Set destination address and psm */
7281 bacpy(&chan->dst, dst);
7282 chan->dst_type = dst_type;
7284 chan->psm = psm;
7285 chan->dcid = cid;
7287 if (bdaddr_type_is_le(dst_type)) {
7288 /* Convert from L2CAP channel address type to HCI address type
7290 if (dst_type == BDADDR_LE_PUBLIC)
7291 dst_type = ADDR_LE_DEV_PUBLIC;
7292 else
7293 dst_type = ADDR_LE_DEV_RANDOM;
7295 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7296 hcon = hci_connect_le(hdev, dst, dst_type,
7297 chan->sec_level,
7298 HCI_LE_CONN_TIMEOUT,
7299 HCI_ROLE_SLAVE, NULL);
7300 else
7301 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7302 chan->sec_level,
7303 HCI_LE_CONN_TIMEOUT);
7305 } else {
7306 u8 auth_type = l2cap_get_auth_type(chan);
7307 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7310 if (IS_ERR(hcon)) {
7311 err = PTR_ERR(hcon);
7312 goto done;
7315 conn = l2cap_conn_add(hcon);
7316 if (!conn) {
7317 hci_conn_drop(hcon);
7318 err = -ENOMEM;
7319 goto done;
7322 mutex_lock(&conn->chan_lock);
7323 l2cap_chan_lock(chan);
7325 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7326 hci_conn_drop(hcon);
7327 err = -EBUSY;
7328 goto chan_unlock;
7331 /* Update source addr of the socket */
7332 bacpy(&chan->src, &hcon->src);
7333 chan->src_type = bdaddr_src_type(hcon);
7335 __l2cap_chan_add(conn, chan);
7337 /* l2cap_chan_add takes its own ref so we can drop this one */
7338 hci_conn_drop(hcon);
7340 l2cap_state_change(chan, BT_CONNECT);
7341 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7343 /* Release chan->sport so that it can be reused by other
7344 * sockets (as it's only used for listening sockets).
7346 write_lock(&chan_list_lock);
7347 chan->sport = 0;
7348 write_unlock(&chan_list_lock);
7350 if (hcon->state == BT_CONNECTED) {
7351 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7352 __clear_chan_timer(chan);
7353 if (l2cap_chan_check_security(chan, true))
7354 l2cap_state_change(chan, BT_CONNECTED);
7355 } else
7356 l2cap_do_start(chan);
7359 err = 0;
7361 chan_unlock:
7362 l2cap_chan_unlock(chan);
7363 mutex_unlock(&conn->chan_lock);
7364 done:
7365 hci_dev_unlock(hdev);
7366 hci_dev_put(hdev);
7367 return err;
7369 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7371 /* ---- L2CAP interface with lower layer (HCI) ---- */
7373 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7375 int exact = 0, lm1 = 0, lm2 = 0;
7376 struct l2cap_chan *c;
7378 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7380 /* Find listening sockets and check their link_mode */
7381 read_lock(&chan_list_lock);
7382 list_for_each_entry(c, &chan_list, global_l) {
7383 if (c->state != BT_LISTEN)
7384 continue;
7386 if (!bacmp(&c->src, &hdev->bdaddr)) {
7387 lm1 |= HCI_LM_ACCEPT;
7388 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7389 lm1 |= HCI_LM_MASTER;
7390 exact++;
7391 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7392 lm2 |= HCI_LM_ACCEPT;
7393 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7394 lm2 |= HCI_LM_MASTER;
7397 read_unlock(&chan_list_lock);
7399 return exact ? lm1 : lm2;
7402 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7403 * from an existing channel in the list or from the beginning of the
7404 * global list (by passing NULL as first parameter).
7406 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7407 struct hci_conn *hcon)
7409 u8 src_type = bdaddr_src_type(hcon);
7411 read_lock(&chan_list_lock);
7413 if (c)
7414 c = list_next_entry(c, global_l);
7415 else
7416 c = list_entry(chan_list.next, typeof(*c), global_l);
7418 list_for_each_entry_from(c, &chan_list, global_l) {
7419 if (c->chan_type != L2CAP_CHAN_FIXED)
7420 continue;
7421 if (c->state != BT_LISTEN)
7422 continue;
7423 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7424 continue;
7425 if (src_type != c->src_type)
7426 continue;
7428 l2cap_chan_hold(c);
7429 read_unlock(&chan_list_lock);
7430 return c;
7433 read_unlock(&chan_list_lock);
7435 return NULL;
7438 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7440 struct hci_dev *hdev = hcon->hdev;
7441 struct l2cap_conn *conn;
7442 struct l2cap_chan *pchan;
7443 u8 dst_type;
7445 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7446 return;
7448 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7450 if (status) {
7451 l2cap_conn_del(hcon, bt_to_errno(status));
7452 return;
7455 conn = l2cap_conn_add(hcon);
7456 if (!conn)
7457 return;
7459 dst_type = bdaddr_dst_type(hcon);
7461 /* If device is blocked, do not create channels for it */
7462 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7463 return;
7465 /* Find fixed channels and notify them of the new connection. We
7466 * use multiple individual lookups, continuing each time where
7467 * we left off, because the list lock would prevent calling the
7468 * potentially sleeping l2cap_chan_lock() function.
7470 pchan = l2cap_global_fixed_chan(NULL, hcon);
7471 while (pchan) {
7472 struct l2cap_chan *chan, *next;
7474 /* Client fixed channels should override server ones */
7475 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7476 goto next;
7478 l2cap_chan_lock(pchan);
7479 chan = pchan->ops->new_connection(pchan);
7480 if (chan) {
7481 bacpy(&chan->src, &hcon->src);
7482 bacpy(&chan->dst, &hcon->dst);
7483 chan->src_type = bdaddr_src_type(hcon);
7484 chan->dst_type = dst_type;
7486 __l2cap_chan_add(conn, chan);
7489 l2cap_chan_unlock(pchan);
7490 next:
7491 next = l2cap_global_fixed_chan(pchan, hcon);
7492 l2cap_chan_put(pchan);
7493 pchan = next;
7496 l2cap_conn_ready(conn);
7499 int l2cap_disconn_ind(struct hci_conn *hcon)
7501 struct l2cap_conn *conn = hcon->l2cap_data;
7503 BT_DBG("hcon %p", hcon);
7505 if (!conn)
7506 return HCI_ERROR_REMOTE_USER_TERM;
7507 return conn->disc_reason;
7510 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7512 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7513 return;
7515 BT_DBG("hcon %p reason %d", hcon, reason);
7517 l2cap_conn_del(hcon, bt_to_errno(reason));
7520 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7522 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7523 return;
7525 if (encrypt == 0x00) {
7526 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7527 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7528 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7529 chan->sec_level == BT_SECURITY_FIPS)
7530 l2cap_chan_close(chan, ECONNREFUSED);
7531 } else {
7532 if (chan->sec_level == BT_SECURITY_MEDIUM)
7533 __clear_chan_timer(chan);
7537 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7539 struct l2cap_conn *conn = hcon->l2cap_data;
7540 struct l2cap_chan *chan;
7542 if (!conn)
7543 return;
7545 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7547 mutex_lock(&conn->chan_lock);
7549 list_for_each_entry(chan, &conn->chan_l, list) {
7550 l2cap_chan_lock(chan);
7552 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7553 state_to_string(chan->state));
7555 if (chan->scid == L2CAP_CID_A2MP) {
7556 l2cap_chan_unlock(chan);
7557 continue;
7560 if (!status && encrypt)
7561 chan->sec_level = hcon->sec_level;
7563 if (!__l2cap_no_conn_pending(chan)) {
7564 l2cap_chan_unlock(chan);
7565 continue;
7568 if (!status && (chan->state == BT_CONNECTED ||
7569 chan->state == BT_CONFIG)) {
7570 chan->ops->resume(chan);
7571 l2cap_check_encryption(chan, encrypt);
7572 l2cap_chan_unlock(chan);
7573 continue;
7576 if (chan->state == BT_CONNECT) {
7577 if (!status && l2cap_check_enc_key_size(hcon))
7578 l2cap_start_connection(chan);
7579 else
7580 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7581 } else if (chan->state == BT_CONNECT2 &&
7582 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7583 struct l2cap_conn_rsp rsp;
7584 __u16 res, stat;
7586 if (!status && l2cap_check_enc_key_size(hcon)) {
7587 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7588 res = L2CAP_CR_PEND;
7589 stat = L2CAP_CS_AUTHOR_PEND;
7590 chan->ops->defer(chan);
7591 } else {
7592 l2cap_state_change(chan, BT_CONFIG);
7593 res = L2CAP_CR_SUCCESS;
7594 stat = L2CAP_CS_NO_INFO;
7596 } else {
7597 l2cap_state_change(chan, BT_DISCONN);
7598 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7599 res = L2CAP_CR_SEC_BLOCK;
7600 stat = L2CAP_CS_NO_INFO;
7603 rsp.scid = cpu_to_le16(chan->dcid);
7604 rsp.dcid = cpu_to_le16(chan->scid);
7605 rsp.result = cpu_to_le16(res);
7606 rsp.status = cpu_to_le16(stat);
7607 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7608 sizeof(rsp), &rsp);
7610 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7611 res == L2CAP_CR_SUCCESS) {
7612 char buf[128];
7613 set_bit(CONF_REQ_SENT, &chan->conf_state);
7614 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7615 L2CAP_CONF_REQ,
7616 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7617 buf);
7618 chan->num_conf_req++;
7622 l2cap_chan_unlock(chan);
7625 mutex_unlock(&conn->chan_lock);
7628 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7630 struct l2cap_conn *conn = hcon->l2cap_data;
7631 struct l2cap_hdr *hdr;
7632 int len;
7634 /* For AMP controller do not create l2cap conn */
7635 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7636 goto drop;
7638 if (!conn)
7639 conn = l2cap_conn_add(hcon);
7641 if (!conn)
7642 goto drop;
7644 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7646 switch (flags) {
7647 case ACL_START:
7648 case ACL_START_NO_FLUSH:
7649 case ACL_COMPLETE:
7650 if (conn->rx_len) {
7651 BT_ERR("Unexpected start frame (len %d)", skb->len);
7652 kfree_skb(conn->rx_skb);
7653 conn->rx_skb = NULL;
7654 conn->rx_len = 0;
7655 l2cap_conn_unreliable(conn, ECOMM);
7658 /* Start fragment always begin with Basic L2CAP header */
7659 if (skb->len < L2CAP_HDR_SIZE) {
7660 BT_ERR("Frame is too short (len %d)", skb->len);
7661 l2cap_conn_unreliable(conn, ECOMM);
7662 goto drop;
7665 hdr = (struct l2cap_hdr *) skb->data;
7666 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7668 if (len == skb->len) {
7669 /* Complete frame received */
7670 l2cap_recv_frame(conn, skb);
7671 return;
7674 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7676 if (skb->len > len) {
7677 BT_ERR("Frame is too long (len %d, expected len %d)",
7678 skb->len, len);
7679 l2cap_conn_unreliable(conn, ECOMM);
7680 goto drop;
7683 /* Allocate skb for the complete frame (with header) */
7684 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7685 if (!conn->rx_skb)
7686 goto drop;
7688 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7689 skb->len);
7690 conn->rx_len = len - skb->len;
7691 break;
7693 case ACL_CONT:
7694 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7696 if (!conn->rx_len) {
7697 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7698 l2cap_conn_unreliable(conn, ECOMM);
7699 goto drop;
7702 if (skb->len > conn->rx_len) {
7703 BT_ERR("Fragment is too long (len %d, expected %d)",
7704 skb->len, conn->rx_len);
7705 kfree_skb(conn->rx_skb);
7706 conn->rx_skb = NULL;
7707 conn->rx_len = 0;
7708 l2cap_conn_unreliable(conn, ECOMM);
7709 goto drop;
7712 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7713 skb->len);
7714 conn->rx_len -= skb->len;
7716 if (!conn->rx_len) {
7717 /* Complete frame received. l2cap_recv_frame
7718 * takes ownership of the skb so set the global
7719 * rx_skb pointer to NULL first.
7721 struct sk_buff *rx_skb = conn->rx_skb;
7722 conn->rx_skb = NULL;
7723 l2cap_recv_frame(conn, rx_skb);
7725 break;
7728 drop:
7729 kfree_skb(skb);
7732 static struct hci_cb l2cap_cb = {
7733 .name = "L2CAP",
7734 .connect_cfm = l2cap_connect_cfm,
7735 .disconn_cfm = l2cap_disconn_cfm,
7736 .security_cfm = l2cap_security_cfm,
7739 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7741 struct l2cap_chan *c;
7743 read_lock(&chan_list_lock);
7745 list_for_each_entry(c, &chan_list, global_l) {
7746 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7747 &c->src, c->src_type, &c->dst, c->dst_type,
7748 c->state, __le16_to_cpu(c->psm),
7749 c->scid, c->dcid, c->imtu, c->omtu,
7750 c->sec_level, c->mode);
7753 read_unlock(&chan_list_lock);
7755 return 0;
7758 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7760 static struct dentry *l2cap_debugfs;
7762 int __init l2cap_init(void)
7764 int err;
7766 err = l2cap_init_sockets();
7767 if (err < 0)
7768 return err;
7770 hci_register_cb(&l2cap_cb);
7772 if (IS_ERR_OR_NULL(bt_debugfs))
7773 return 0;
7775 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7776 NULL, &l2cap_debugfs_fops);
7778 return 0;
7781 void l2cap_exit(void)
7783 debugfs_remove(l2cap_debugfs);
7784 hci_unregister_cb(&l2cap_cb);
7785 l2cap_cleanup_sockets();
7788 module_param(disable_ertm, bool, 0644);
7789 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");