Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / net / bluetooth / l2cap_core.c
blob6eabbe05fe54fe8ecc39707a05350439d99ce830
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
45 #define LE_FLOWCTL_MAX_CREDITS 65535
47 bool disable_ertm;
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 void *data);
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
73 else
74 return BDADDR_LE_RANDOM;
77 return BDADDR_BREDR;
80 /* ---- L2CAP channels ---- */
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83 u16 cid)
85 struct l2cap_chan *c;
87 list_for_each_entry(c, &conn->chan_l, list) {
88 if (c->dcid == cid)
89 return c;
91 return NULL;
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 u16 cid)
97 struct l2cap_chan *c;
99 list_for_each_entry(c, &conn->chan_l, list) {
100 if (c->scid == cid)
101 return c;
103 return NULL;
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 u16 cid)
111 struct l2cap_chan *c;
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
115 if (c)
116 l2cap_chan_lock(c);
117 mutex_unlock(&conn->chan_lock);
119 return c;
122 /* Find channel with given DCID.
123 * Returns locked channel.
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 u16 cid)
128 struct l2cap_chan *c;
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
132 if (c)
133 l2cap_chan_lock(c);
134 mutex_unlock(&conn->chan_lock);
136 return c;
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 u8 ident)
142 struct l2cap_chan *c;
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
146 return c;
148 return NULL;
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 u8 ident)
154 struct l2cap_chan *c;
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
158 if (c)
159 l2cap_chan_lock(c);
160 mutex_unlock(&conn->chan_lock);
162 return c;
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
167 struct l2cap_chan *c;
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
171 return c;
173 return NULL;
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
178 int err;
180 write_lock(&chan_list_lock);
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 err = -EADDRINUSE;
184 goto done;
187 if (psm) {
188 chan->psm = psm;
189 chan->sport = psm;
190 err = 0;
191 } else {
192 u16 p;
194 err = -EINVAL;
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
199 err = 0;
200 break;
204 done:
205 write_unlock(&chan_list_lock);
206 return err;
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
213 chan->scid = scid;
215 write_unlock(&chan_list_lock);
217 return 0;
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
222 u16 cid, dyn_end;
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
234 return 0;
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
279 struct sk_buff *skb;
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
286 return NULL;
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
320 return 0;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
348 return seq;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
353 u16 i;
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
438 return chan;
441 static void l2cap_chan_destroy(struct kref *kref)
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445 BT_DBG("chan %p", chan);
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
451 kfree(chan);
454 void l2cap_chan_hold(struct l2cap_chan *c)
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458 kref_get(&c->kref);
461 void l2cap_chan_put(struct l2cap_chan *c)
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465 kref_put(&c->kref, l2cap_chan_destroy);
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
488 chan->sdu = NULL;
489 chan->sdu_last_frag = NULL;
490 chan->sdu_len = 0;
491 chan->tx_credits = 0;
492 chan->rx_credits = le_max_credits;
493 chan->mps = min_t(u16, chan->imtu, le_default_mps);
495 skb_queue_head_init(&chan->tx_q);
498 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
500 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
501 __le16_to_cpu(chan->psm), chan->dcid);
503 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
505 chan->conn = conn;
507 switch (chan->chan_type) {
508 case L2CAP_CHAN_CONN_ORIENTED:
509 /* Alloc CID for connection-oriented socket */
510 chan->scid = l2cap_alloc_cid(conn);
511 if (conn->hcon->type == ACL_LINK)
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 break;
515 case L2CAP_CHAN_CONN_LESS:
516 /* Connectionless socket */
517 chan->scid = L2CAP_CID_CONN_LESS;
518 chan->dcid = L2CAP_CID_CONN_LESS;
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
522 case L2CAP_CHAN_FIXED:
523 /* Caller will set CID and CID specific MTU values */
524 break;
526 default:
527 /* Raw socket can send/recv signalling messages only */
528 chan->scid = L2CAP_CID_SIGNALING;
529 chan->dcid = L2CAP_CID_SIGNALING;
530 chan->omtu = L2CAP_DEFAULT_MTU;
533 chan->local_id = L2CAP_BESTEFFORT_ID;
534 chan->local_stype = L2CAP_SERV_BESTEFFORT;
535 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
536 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
537 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
538 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
540 l2cap_chan_hold(chan);
542 hci_conn_hold(conn->hcon);
544 list_add(&chan->list, &conn->chan_l);
547 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
549 mutex_lock(&conn->chan_lock);
550 __l2cap_chan_add(conn, chan);
551 mutex_unlock(&conn->chan_lock);
554 void l2cap_chan_del(struct l2cap_chan *chan, int err)
556 struct l2cap_conn *conn = chan->conn;
558 __clear_chan_timer(chan);
560 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
562 if (conn) {
563 struct amp_mgr *mgr = conn->hcon->amp_mgr;
564 /* Delete from channel list */
565 list_del(&chan->list);
567 l2cap_chan_put(chan);
569 chan->conn = NULL;
571 if (chan->scid != L2CAP_CID_A2MP)
572 hci_conn_drop(conn->hcon);
574 if (mgr && mgr->bredr_chan == chan)
575 mgr->bredr_chan = NULL;
578 if (chan->hs_hchan) {
579 struct hci_chan *hs_hchan = chan->hs_hchan;
581 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
582 amp_disconnect_logical_link(hs_hchan);
585 chan->ops->teardown(chan, err);
587 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
588 return;
590 switch(chan->mode) {
591 case L2CAP_MODE_BASIC:
592 break;
594 case L2CAP_MODE_LE_FLOWCTL:
595 skb_queue_purge(&chan->tx_q);
596 break;
598 case L2CAP_MODE_ERTM:
599 __clear_retrans_timer(chan);
600 __clear_monitor_timer(chan);
601 __clear_ack_timer(chan);
603 skb_queue_purge(&chan->srej_q);
605 l2cap_seq_list_free(&chan->srej_list);
606 l2cap_seq_list_free(&chan->retrans_list);
608 /* fall through */
610 case L2CAP_MODE_STREAMING:
611 skb_queue_purge(&chan->tx_q);
612 break;
615 return;
618 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
620 struct l2cap_conn *conn = hcon->l2cap_data;
621 struct l2cap_chan *chan;
623 mutex_lock(&conn->chan_lock);
625 list_for_each_entry(chan, &conn->chan_l, list) {
626 l2cap_chan_lock(chan);
627 bacpy(&chan->dst, &hcon->dst);
628 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
629 l2cap_chan_unlock(chan);
632 mutex_unlock(&conn->chan_lock);
635 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
638 struct l2cap_le_conn_rsp rsp;
639 u16 result;
641 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 result = L2CAP_CR_AUTHORIZATION;
643 else
644 result = L2CAP_CR_BAD_PSM;
646 l2cap_state_change(chan, BT_DISCONN);
648 rsp.dcid = cpu_to_le16(chan->scid);
649 rsp.mtu = cpu_to_le16(chan->imtu);
650 rsp.mps = cpu_to_le16(chan->mps);
651 rsp.credits = cpu_to_le16(chan->rx_credits);
652 rsp.result = cpu_to_le16(result);
654 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
655 &rsp);
658 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
660 struct l2cap_conn *conn = chan->conn;
661 struct l2cap_conn_rsp rsp;
662 u16 result;
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
666 else
667 result = L2CAP_CR_BAD_PSM;
669 l2cap_state_change(chan, BT_DISCONN);
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
679 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
681 struct l2cap_conn *conn = chan->conn;
683 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
685 switch (chan->state) {
686 case BT_LISTEN:
687 chan->ops->teardown(chan, 0);
688 break;
690 case BT_CONNECTED:
691 case BT_CONFIG:
692 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
693 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 l2cap_send_disconn_req(chan, reason);
695 } else
696 l2cap_chan_del(chan, reason);
697 break;
699 case BT_CONNECT2:
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 if (conn->hcon->type == ACL_LINK)
702 l2cap_chan_connect_reject(chan);
703 else if (conn->hcon->type == LE_LINK)
704 l2cap_chan_le_connect_reject(chan);
707 l2cap_chan_del(chan, reason);
708 break;
710 case BT_CONNECT:
711 case BT_DISCONN:
712 l2cap_chan_del(chan, reason);
713 break;
715 default:
716 chan->ops->teardown(chan, 0);
717 break;
721 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
723 switch (chan->chan_type) {
724 case L2CAP_CHAN_RAW:
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 case BT_SECURITY_FIPS:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
731 default:
732 return HCI_AT_NO_BONDING;
734 break;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
740 if (chan->sec_level == BT_SECURITY_HIGH ||
741 chan->sec_level == BT_SECURITY_FIPS)
742 return HCI_AT_NO_BONDING_MITM;
743 else
744 return HCI_AT_NO_BONDING;
745 break;
746 case L2CAP_CHAN_CONN_ORIENTED:
747 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
748 if (chan->sec_level == BT_SECURITY_LOW)
749 chan->sec_level = BT_SECURITY_SDP;
751 if (chan->sec_level == BT_SECURITY_HIGH ||
752 chan->sec_level == BT_SECURITY_FIPS)
753 return HCI_AT_NO_BONDING_MITM;
754 else
755 return HCI_AT_NO_BONDING;
757 /* fall through */
758 default:
759 switch (chan->sec_level) {
760 case BT_SECURITY_HIGH:
761 case BT_SECURITY_FIPS:
762 return HCI_AT_GENERAL_BONDING_MITM;
763 case BT_SECURITY_MEDIUM:
764 return HCI_AT_GENERAL_BONDING;
765 default:
766 return HCI_AT_NO_BONDING;
768 break;
772 /* Service level security */
773 int l2cap_chan_check_security(struct l2cap_chan *chan)
775 struct l2cap_conn *conn = chan->conn;
776 __u8 auth_type;
778 if (conn->hcon->type == LE_LINK)
779 return smp_conn_security(conn->hcon, chan->sec_level);
781 auth_type = l2cap_get_auth_type(chan);
783 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
786 static u8 l2cap_get_ident(struct l2cap_conn *conn)
788 u8 id;
790 /* Get next available identificator.
791 * 1 - 128 are used by kernel.
792 * 129 - 199 are reserved.
793 * 200 - 254 are used by utilities like l2ping, etc.
796 spin_lock(&conn->lock);
798 if (++conn->tx_ident > 128)
799 conn->tx_ident = 1;
801 id = conn->tx_ident;
803 spin_unlock(&conn->lock);
805 return id;
808 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
809 void *data)
811 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
812 u8 flags;
814 BT_DBG("code 0x%2.2x", code);
816 if (!skb)
817 return;
819 if (lmp_no_flush_capable(conn->hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
821 else
822 flags = ACL_START;
824 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
825 skb->priority = HCI_PRIO_MAX;
827 hci_send_acl(conn->hchan, skb, flags);
830 static bool __chan_is_moving(struct l2cap_chan *chan)
832 return chan->move_state != L2CAP_MOVE_STABLE &&
833 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
836 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
838 struct hci_conn *hcon = chan->conn->hcon;
839 u16 flags;
841 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
842 skb->priority);
844 if (chan->hs_hcon && !__chan_is_moving(chan)) {
845 if (chan->hs_hchan)
846 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
847 else
848 kfree_skb(skb);
850 return;
853 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
854 lmp_no_flush_capable(hcon->hdev))
855 flags = ACL_START_NO_FLUSH;
856 else
857 flags = ACL_START;
859 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
860 hci_send_acl(chan->conn->hchan, skb, flags);
863 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
865 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
866 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
868 if (enh & L2CAP_CTRL_FRAME_TYPE) {
869 /* S-Frame */
870 control->sframe = 1;
871 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
872 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
874 control->sar = 0;
875 control->txseq = 0;
876 } else {
877 /* I-Frame */
878 control->sframe = 0;
879 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
880 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
882 control->poll = 0;
883 control->super = 0;
887 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
889 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
890 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
892 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
893 /* S-Frame */
894 control->sframe = 1;
895 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
896 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
898 control->sar = 0;
899 control->txseq = 0;
900 } else {
901 /* I-Frame */
902 control->sframe = 0;
903 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
904 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
906 control->poll = 0;
907 control->super = 0;
911 static inline void __unpack_control(struct l2cap_chan *chan,
912 struct sk_buff *skb)
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
915 __unpack_extended_control(get_unaligned_le32(skb->data),
916 &bt_cb(skb)->control);
917 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
918 } else {
919 __unpack_enhanced_control(get_unaligned_le16(skb->data),
920 &bt_cb(skb)->control);
921 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
925 static u32 __pack_extended_control(struct l2cap_ctrl *control)
927 u32 packed;
929 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
930 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
932 if (control->sframe) {
933 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
934 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
935 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
936 } else {
937 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
938 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
941 return packed;
944 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
946 u16 packed;
948 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
949 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
951 if (control->sframe) {
952 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
953 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
954 packed |= L2CAP_CTRL_FRAME_TYPE;
955 } else {
956 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
957 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
960 return packed;
963 static inline void __pack_control(struct l2cap_chan *chan,
964 struct l2cap_ctrl *control,
965 struct sk_buff *skb)
967 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
968 put_unaligned_le32(__pack_extended_control(control),
969 skb->data + L2CAP_HDR_SIZE);
970 } else {
971 put_unaligned_le16(__pack_enhanced_control(control),
972 skb->data + L2CAP_HDR_SIZE);
976 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
978 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 return L2CAP_EXT_HDR_SIZE;
980 else
981 return L2CAP_ENH_HDR_SIZE;
984 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 u32 control)
987 struct sk_buff *skb;
988 struct l2cap_hdr *lh;
989 int hlen = __ertm_hdr_size(chan);
991 if (chan->fcs == L2CAP_FCS_CRC16)
992 hlen += L2CAP_FCS_SIZE;
994 skb = bt_skb_alloc(hlen, GFP_KERNEL);
996 if (!skb)
997 return ERR_PTR(-ENOMEM);
999 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1000 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1001 lh->cid = cpu_to_le16(chan->dcid);
1003 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1004 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1005 else
1006 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1008 if (chan->fcs == L2CAP_FCS_CRC16) {
1009 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1010 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1013 skb->priority = HCI_PRIO_MAX;
1014 return skb;
1017 static void l2cap_send_sframe(struct l2cap_chan *chan,
1018 struct l2cap_ctrl *control)
1020 struct sk_buff *skb;
1021 u32 control_field;
1023 BT_DBG("chan %p, control %p", chan, control);
1025 if (!control->sframe)
1026 return;
1028 if (__chan_is_moving(chan))
1029 return;
1031 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 !control->poll)
1033 control->final = 1;
1035 if (control->super == L2CAP_SUPER_RR)
1036 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1037 else if (control->super == L2CAP_SUPER_RNR)
1038 set_bit(CONN_RNR_SENT, &chan->conn_state);
1040 if (control->super != L2CAP_SUPER_SREJ) {
1041 chan->last_acked_seq = control->reqseq;
1042 __clear_ack_timer(chan);
1045 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1046 control->final, control->poll, control->super);
1048 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1049 control_field = __pack_extended_control(control);
1050 else
1051 control_field = __pack_enhanced_control(control);
1053 skb = l2cap_create_sframe_pdu(chan, control_field);
1054 if (!IS_ERR(skb))
1055 l2cap_do_send(chan, skb);
1058 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1060 struct l2cap_ctrl control;
1062 BT_DBG("chan %p, poll %d", chan, poll);
1064 memset(&control, 0, sizeof(control));
1065 control.sframe = 1;
1066 control.poll = poll;
1068 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1069 control.super = L2CAP_SUPER_RNR;
1070 else
1071 control.super = L2CAP_SUPER_RR;
1073 control.reqseq = chan->buffer_seq;
1074 l2cap_send_sframe(chan, &control);
1077 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1079 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1082 static bool __amp_capable(struct l2cap_chan *chan)
1084 struct l2cap_conn *conn = chan->conn;
1085 struct hci_dev *hdev;
1086 bool amp_available = false;
1088 if (!conn->hs_enabled)
1089 return false;
1091 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1092 return false;
1094 read_lock(&hci_dev_list_lock);
1095 list_for_each_entry(hdev, &hci_dev_list, list) {
1096 if (hdev->amp_type != AMP_TYPE_BREDR &&
1097 test_bit(HCI_UP, &hdev->flags)) {
1098 amp_available = true;
1099 break;
1102 read_unlock(&hci_dev_list_lock);
1104 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1105 return amp_available;
1107 return false;
1110 static bool l2cap_check_efs(struct l2cap_chan *chan)
1112 /* Check EFS parameters */
1113 return true;
1116 void l2cap_send_conn_req(struct l2cap_chan *chan)
1118 struct l2cap_conn *conn = chan->conn;
1119 struct l2cap_conn_req req;
1121 req.scid = cpu_to_le16(chan->scid);
1122 req.psm = chan->psm;
1124 chan->ident = l2cap_get_ident(conn);
1126 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1128 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1131 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1133 struct l2cap_create_chan_req req;
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136 req.amp_id = amp_id;
1138 chan->ident = l2cap_get_ident(chan->conn);
1140 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 sizeof(req), &req);
1144 static void l2cap_move_setup(struct l2cap_chan *chan)
1146 struct sk_buff *skb;
1148 BT_DBG("chan %p", chan);
1150 if (chan->mode != L2CAP_MODE_ERTM)
1151 return;
1153 __clear_retrans_timer(chan);
1154 __clear_monitor_timer(chan);
1155 __clear_ack_timer(chan);
1157 chan->retry_count = 0;
1158 skb_queue_walk(&chan->tx_q, skb) {
1159 if (bt_cb(skb)->control.retries)
1160 bt_cb(skb)->control.retries = 1;
1161 else
1162 break;
1165 chan->expected_tx_seq = chan->buffer_seq;
1167 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1168 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1169 l2cap_seq_list_clear(&chan->retrans_list);
1170 l2cap_seq_list_clear(&chan->srej_list);
1171 skb_queue_purge(&chan->srej_q);
1173 chan->tx_state = L2CAP_TX_STATE_XMIT;
1174 chan->rx_state = L2CAP_RX_STATE_MOVE;
1176 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1179 static void l2cap_move_done(struct l2cap_chan *chan)
1181 u8 move_role = chan->move_role;
1182 BT_DBG("chan %p", chan);
1184 chan->move_state = L2CAP_MOVE_STABLE;
1185 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1187 if (chan->mode != L2CAP_MODE_ERTM)
1188 return;
1190 switch (move_role) {
1191 case L2CAP_MOVE_ROLE_INITIATOR:
1192 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1194 break;
1195 case L2CAP_MOVE_ROLE_RESPONDER:
1196 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1197 break;
1201 static void l2cap_chan_ready(struct l2cap_chan *chan)
1203 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1204 chan->conf_state = 0;
1205 __clear_chan_timer(chan);
1207 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1208 chan->ops->suspend(chan);
1210 chan->state = BT_CONNECTED;
1212 chan->ops->ready(chan);
1215 static void l2cap_le_connect(struct l2cap_chan *chan)
1217 struct l2cap_conn *conn = chan->conn;
1218 struct l2cap_le_conn_req req;
1220 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1221 return;
1223 req.psm = chan->psm;
1224 req.scid = cpu_to_le16(chan->scid);
1225 req.mtu = cpu_to_le16(chan->imtu);
1226 req.mps = cpu_to_le16(chan->mps);
1227 req.credits = cpu_to_le16(chan->rx_credits);
1229 chan->ident = l2cap_get_ident(conn);
1231 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1232 sizeof(req), &req);
1235 static void l2cap_le_start(struct l2cap_chan *chan)
1237 struct l2cap_conn *conn = chan->conn;
1239 if (!smp_conn_security(conn->hcon, chan->sec_level))
1240 return;
1242 if (!chan->psm) {
1243 l2cap_chan_ready(chan);
1244 return;
1247 if (chan->state == BT_CONNECT)
1248 l2cap_le_connect(chan);
1251 static void l2cap_start_connection(struct l2cap_chan *chan)
1253 if (__amp_capable(chan)) {
1254 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1255 a2mp_discover_amp(chan);
1256 } else if (chan->conn->hcon->type == LE_LINK) {
1257 l2cap_le_start(chan);
1258 } else {
1259 l2cap_send_conn_req(chan);
1263 static void l2cap_do_start(struct l2cap_chan *chan)
1265 struct l2cap_conn *conn = chan->conn;
1267 if (conn->hcon->type == LE_LINK) {
1268 l2cap_le_start(chan);
1269 return;
1272 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1273 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1274 return;
1276 if (l2cap_chan_check_security(chan) &&
1277 __l2cap_no_conn_pending(chan)) {
1278 l2cap_start_connection(chan);
1280 } else {
1281 struct l2cap_info_req req;
1282 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1285 conn->info_ident = l2cap_get_ident(conn);
1287 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1289 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1290 sizeof(req), &req);
1294 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1296 u32 local_feat_mask = l2cap_feat_mask;
1297 if (!disable_ertm)
1298 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1300 switch (mode) {
1301 case L2CAP_MODE_ERTM:
1302 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1303 case L2CAP_MODE_STREAMING:
1304 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1305 default:
1306 return 0x00;
1310 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1312 struct l2cap_conn *conn = chan->conn;
1313 struct l2cap_disconn_req req;
1315 if (!conn)
1316 return;
1318 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1319 __clear_retrans_timer(chan);
1320 __clear_monitor_timer(chan);
1321 __clear_ack_timer(chan);
1324 if (chan->scid == L2CAP_CID_A2MP) {
1325 l2cap_state_change(chan, BT_DISCONN);
1326 return;
1329 req.dcid = cpu_to_le16(chan->dcid);
1330 req.scid = cpu_to_le16(chan->scid);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1332 sizeof(req), &req);
1334 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1337 /* ---- L2CAP connections ---- */
1338 static void l2cap_conn_start(struct l2cap_conn *conn)
1340 struct l2cap_chan *chan, *tmp;
1342 BT_DBG("conn %p", conn);
1344 mutex_lock(&conn->chan_lock);
1346 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1347 l2cap_chan_lock(chan);
1349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1350 l2cap_chan_unlock(chan);
1351 continue;
1354 if (chan->state == BT_CONNECT) {
1355 if (!l2cap_chan_check_security(chan) ||
1356 !__l2cap_no_conn_pending(chan)) {
1357 l2cap_chan_unlock(chan);
1358 continue;
1361 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1362 && test_bit(CONF_STATE2_DEVICE,
1363 &chan->conf_state)) {
1364 l2cap_chan_close(chan, ECONNRESET);
1365 l2cap_chan_unlock(chan);
1366 continue;
1369 l2cap_start_connection(chan);
1371 } else if (chan->state == BT_CONNECT2) {
1372 struct l2cap_conn_rsp rsp;
1373 char buf[128];
1374 rsp.scid = cpu_to_le16(chan->dcid);
1375 rsp.dcid = cpu_to_le16(chan->scid);
1377 if (l2cap_chan_check_security(chan)) {
1378 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1381 chan->ops->defer(chan);
1383 } else {
1384 l2cap_state_change(chan, BT_CONFIG);
1385 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1386 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1388 } else {
1389 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1390 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1393 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1394 sizeof(rsp), &rsp);
1396 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1397 rsp.result != L2CAP_CR_SUCCESS) {
1398 l2cap_chan_unlock(chan);
1399 continue;
1402 set_bit(CONF_REQ_SENT, &chan->conf_state);
1403 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1404 l2cap_build_conf_req(chan, buf), buf);
1405 chan->num_conf_req++;
1408 l2cap_chan_unlock(chan);
1411 mutex_unlock(&conn->chan_lock);
1414 /* Find socket with cid and source/destination bdaddr.
1415 * Returns closest match, locked.
1417 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1418 bdaddr_t *src,
1419 bdaddr_t *dst)
1421 struct l2cap_chan *c, *c1 = NULL;
1423 read_lock(&chan_list_lock);
1425 list_for_each_entry(c, &chan_list, global_l) {
1426 if (state && c->state != state)
1427 continue;
1429 if (c->scid == cid) {
1430 int src_match, dst_match;
1431 int src_any, dst_any;
1433 /* Exact match. */
1434 src_match = !bacmp(&c->src, src);
1435 dst_match = !bacmp(&c->dst, dst);
1436 if (src_match && dst_match) {
1437 read_unlock(&chan_list_lock);
1438 return c;
1441 /* Closest match */
1442 src_any = !bacmp(&c->src, BDADDR_ANY);
1443 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1444 if ((src_match && dst_any) || (src_any && dst_match) ||
1445 (src_any && dst_any))
1446 c1 = c;
1450 read_unlock(&chan_list_lock);
1452 return c1;
1455 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1457 struct hci_conn *hcon = conn->hcon;
1458 struct l2cap_chan *chan, *pchan;
1459 u8 dst_type;
1461 BT_DBG("");
1463 bt_6lowpan_add_conn(conn);
1465 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst);
1468 if (!pchan)
1469 return;
1471 /* Client ATT sockets should override the server one */
1472 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1473 return;
1475 dst_type = bdaddr_type(hcon, hcon->dst_type);
1477 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1479 return;
1481 l2cap_chan_lock(pchan);
1483 chan = pchan->ops->new_connection(pchan);
1484 if (!chan)
1485 goto clean;
1487 bacpy(&chan->src, &hcon->src);
1488 bacpy(&chan->dst, &hcon->dst);
1489 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1490 chan->dst_type = dst_type;
1492 __l2cap_chan_add(conn, chan);
1494 clean:
1495 l2cap_chan_unlock(pchan);
1498 static void l2cap_conn_ready(struct l2cap_conn *conn)
1500 struct l2cap_chan *chan;
1501 struct hci_conn *hcon = conn->hcon;
1503 BT_DBG("conn %p", conn);
1505 /* For outgoing pairing which doesn't necessarily have an
1506 * associated socket (e.g. mgmt_pair_device).
1508 if (hcon->out && hcon->type == LE_LINK)
1509 smp_conn_security(hcon, hcon->pending_sec_level);
1511 mutex_lock(&conn->chan_lock);
1513 if (hcon->type == LE_LINK)
1514 l2cap_le_conn_ready(conn);
1516 list_for_each_entry(chan, &conn->chan_l, list) {
1518 l2cap_chan_lock(chan);
1520 if (chan->scid == L2CAP_CID_A2MP) {
1521 l2cap_chan_unlock(chan);
1522 continue;
1525 if (hcon->type == LE_LINK) {
1526 l2cap_le_start(chan);
1527 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 l2cap_chan_ready(chan);
1530 } else if (chan->state == BT_CONNECT) {
1531 l2cap_do_start(chan);
1534 l2cap_chan_unlock(chan);
1537 mutex_unlock(&conn->chan_lock);
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1545 struct l2cap_chan *chan;
1547 BT_DBG("conn %p", conn);
1549 mutex_lock(&conn->chan_lock);
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1556 mutex_unlock(&conn->chan_lock);
1559 static void l2cap_info_timeout(struct work_struct *work)
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 info_timer.work);
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1567 l2cap_conn_start(conn);
1571 * l2cap_user
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1585 struct hci_dev *hdev = conn->hcon->hdev;
1586 int ret;
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1593 * here, too. */
1595 hci_dev_lock(hdev);
1597 if (user->list.next || user->list.prev) {
1598 ret = -EINVAL;
1599 goto out_unlock;
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1603 if (!conn->hchan) {
1604 ret = -ENODEV;
1605 goto out_unlock;
1608 ret = user->probe(conn, user);
1609 if (ret)
1610 goto out_unlock;
1612 list_add(&user->list, &conn->users);
1613 ret = 0;
1615 out_unlock:
1616 hci_dev_unlock(hdev);
1617 return ret;
1619 EXPORT_SYMBOL(l2cap_register_user);
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1623 struct hci_dev *hdev = conn->hcon->hdev;
1625 hci_dev_lock(hdev);
1627 if (!user->list.next || !user->list.prev)
1628 goto out_unlock;
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1635 out_unlock:
1636 hci_dev_unlock(hdev);
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1642 struct l2cap_user *user;
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1658 if (!conn)
1659 return;
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1663 kfree_skb(conn->rx_skb);
1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work);
1668 l2cap_unregister_all_users(conn);
1670 mutex_lock(&conn->chan_lock);
1672 /* Kill channels */
1673 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1674 l2cap_chan_hold(chan);
1675 l2cap_chan_lock(chan);
1677 l2cap_chan_del(chan, err);
1679 l2cap_chan_unlock(chan);
1681 chan->ops->close(chan);
1682 l2cap_chan_put(chan);
1685 mutex_unlock(&conn->chan_lock);
1687 hci_chan_del(conn->hchan);
1689 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1690 cancel_delayed_work_sync(&conn->info_timer);
1692 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1693 cancel_delayed_work_sync(&conn->security_timer);
1694 smp_chan_destroy(conn);
1697 hcon->l2cap_data = NULL;
1698 conn->hchan = NULL;
1699 l2cap_conn_put(conn);
1702 static void security_timeout(struct work_struct *work)
1704 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1705 security_timer.work);
1707 BT_DBG("conn %p", conn);
1709 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1710 smp_chan_destroy(conn);
1711 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1715 static void l2cap_conn_free(struct kref *ref)
1717 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1719 hci_conn_put(conn->hcon);
1720 kfree(conn);
1723 void l2cap_conn_get(struct l2cap_conn *conn)
1725 kref_get(&conn->ref);
1727 EXPORT_SYMBOL(l2cap_conn_get);
1729 void l2cap_conn_put(struct l2cap_conn *conn)
1731 kref_put(&conn->ref, l2cap_conn_free);
1733 EXPORT_SYMBOL(l2cap_conn_put);
1735 /* ---- Socket interface ---- */
1737 /* Find socket with psm and source / destination bdaddr.
1738 * Returns closest match.
1740 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1741 bdaddr_t *src,
1742 bdaddr_t *dst,
1743 u8 link_type)
1745 struct l2cap_chan *c, *c1 = NULL;
1747 read_lock(&chan_list_lock);
1749 list_for_each_entry(c, &chan_list, global_l) {
1750 if (state && c->state != state)
1751 continue;
1753 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1754 continue;
1756 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1757 continue;
1759 if (c->psm == psm) {
1760 int src_match, dst_match;
1761 int src_any, dst_any;
1763 /* Exact match. */
1764 src_match = !bacmp(&c->src, src);
1765 dst_match = !bacmp(&c->dst, dst);
1766 if (src_match && dst_match) {
1767 read_unlock(&chan_list_lock);
1768 return c;
1771 /* Closest match */
1772 src_any = !bacmp(&c->src, BDADDR_ANY);
1773 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1774 if ((src_match && dst_any) || (src_any && dst_match) ||
1775 (src_any && dst_any))
1776 c1 = c;
1780 read_unlock(&chan_list_lock);
1782 return c1;
1785 static void l2cap_monitor_timeout(struct work_struct *work)
1787 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1788 monitor_timer.work);
1790 BT_DBG("chan %p", chan);
1792 l2cap_chan_lock(chan);
1794 if (!chan->conn) {
1795 l2cap_chan_unlock(chan);
1796 l2cap_chan_put(chan);
1797 return;
1800 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1802 l2cap_chan_unlock(chan);
1803 l2cap_chan_put(chan);
1806 static void l2cap_retrans_timeout(struct work_struct *work)
1808 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1809 retrans_timer.work);
1811 BT_DBG("chan %p", chan);
1813 l2cap_chan_lock(chan);
1815 if (!chan->conn) {
1816 l2cap_chan_unlock(chan);
1817 l2cap_chan_put(chan);
1818 return;
1821 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1822 l2cap_chan_unlock(chan);
1823 l2cap_chan_put(chan);
1826 static void l2cap_streaming_send(struct l2cap_chan *chan,
1827 struct sk_buff_head *skbs)
1829 struct sk_buff *skb;
1830 struct l2cap_ctrl *control;
1832 BT_DBG("chan %p, skbs %p", chan, skbs);
1834 if (__chan_is_moving(chan))
1835 return;
1837 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1839 while (!skb_queue_empty(&chan->tx_q)) {
1841 skb = skb_dequeue(&chan->tx_q);
1843 bt_cb(skb)->control.retries = 1;
1844 control = &bt_cb(skb)->control;
1846 control->reqseq = 0;
1847 control->txseq = chan->next_tx_seq;
1849 __pack_control(chan, control, skb);
1851 if (chan->fcs == L2CAP_FCS_CRC16) {
1852 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1853 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1856 l2cap_do_send(chan, skb);
1858 BT_DBG("Sent txseq %u", control->txseq);
1860 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1861 chan->frames_sent++;
1865 static int l2cap_ertm_send(struct l2cap_chan *chan)
1867 struct sk_buff *skb, *tx_skb;
1868 struct l2cap_ctrl *control;
1869 int sent = 0;
1871 BT_DBG("chan %p", chan);
1873 if (chan->state != BT_CONNECTED)
1874 return -ENOTCONN;
1876 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1877 return 0;
1879 if (__chan_is_moving(chan))
1880 return 0;
1882 while (chan->tx_send_head &&
1883 chan->unacked_frames < chan->remote_tx_win &&
1884 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1886 skb = chan->tx_send_head;
1888 bt_cb(skb)->control.retries = 1;
1889 control = &bt_cb(skb)->control;
1891 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1892 control->final = 1;
1894 control->reqseq = chan->buffer_seq;
1895 chan->last_acked_seq = chan->buffer_seq;
1896 control->txseq = chan->next_tx_seq;
1898 __pack_control(chan, control, skb);
1900 if (chan->fcs == L2CAP_FCS_CRC16) {
1901 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1902 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1905 /* Clone after data has been modified. Data is assumed to be
1906 read-only (for locking purposes) on cloned sk_buffs.
1908 tx_skb = skb_clone(skb, GFP_KERNEL);
1910 if (!tx_skb)
1911 break;
1913 __set_retrans_timer(chan);
1915 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1916 chan->unacked_frames++;
1917 chan->frames_sent++;
1918 sent++;
1920 if (skb_queue_is_last(&chan->tx_q, skb))
1921 chan->tx_send_head = NULL;
1922 else
1923 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1925 l2cap_do_send(chan, tx_skb);
1926 BT_DBG("Sent txseq %u", control->txseq);
1929 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1930 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1932 return sent;
1935 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1937 struct l2cap_ctrl control;
1938 struct sk_buff *skb;
1939 struct sk_buff *tx_skb;
1940 u16 seq;
1942 BT_DBG("chan %p", chan);
1944 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1945 return;
1947 if (__chan_is_moving(chan))
1948 return;
1950 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1951 seq = l2cap_seq_list_pop(&chan->retrans_list);
1953 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1954 if (!skb) {
1955 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1956 seq);
1957 continue;
1960 bt_cb(skb)->control.retries++;
1961 control = bt_cb(skb)->control;
1963 if (chan->max_tx != 0 &&
1964 bt_cb(skb)->control.retries > chan->max_tx) {
1965 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1966 l2cap_send_disconn_req(chan, ECONNRESET);
1967 l2cap_seq_list_clear(&chan->retrans_list);
1968 break;
1971 control.reqseq = chan->buffer_seq;
1972 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1973 control.final = 1;
1974 else
1975 control.final = 0;
1977 if (skb_cloned(skb)) {
1978 /* Cloned sk_buffs are read-only, so we need a
1979 * writeable copy
1981 tx_skb = skb_copy(skb, GFP_KERNEL);
1982 } else {
1983 tx_skb = skb_clone(skb, GFP_KERNEL);
1986 if (!tx_skb) {
1987 l2cap_seq_list_clear(&chan->retrans_list);
1988 break;
1991 /* Update skb contents */
1992 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1993 put_unaligned_le32(__pack_extended_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1995 } else {
1996 put_unaligned_le16(__pack_enhanced_control(&control),
1997 tx_skb->data + L2CAP_HDR_SIZE);
2000 if (chan->fcs == L2CAP_FCS_CRC16) {
2001 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2002 put_unaligned_le16(fcs, skb_put(tx_skb,
2003 L2CAP_FCS_SIZE));
2006 l2cap_do_send(chan, tx_skb);
2008 BT_DBG("Resent txseq %d", control.txseq);
2010 chan->last_acked_seq = chan->buffer_seq;
2014 static void l2cap_retransmit(struct l2cap_chan *chan,
2015 struct l2cap_ctrl *control)
2017 BT_DBG("chan %p, control %p", chan, control);
2019 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2020 l2cap_ertm_resend(chan);
2023 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2024 struct l2cap_ctrl *control)
2026 struct sk_buff *skb;
2028 BT_DBG("chan %p, control %p", chan, control);
2030 if (control->poll)
2031 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2033 l2cap_seq_list_clear(&chan->retrans_list);
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 return;
2038 if (chan->unacked_frames) {
2039 skb_queue_walk(&chan->tx_q, skb) {
2040 if (bt_cb(skb)->control.txseq == control->reqseq ||
2041 skb == chan->tx_send_head)
2042 break;
2045 skb_queue_walk_from(&chan->tx_q, skb) {
2046 if (skb == chan->tx_send_head)
2047 break;
2049 l2cap_seq_list_append(&chan->retrans_list,
2050 bt_cb(skb)->control.txseq);
2053 l2cap_ertm_resend(chan);
2057 static void l2cap_send_ack(struct l2cap_chan *chan)
2059 struct l2cap_ctrl control;
2060 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2061 chan->last_acked_seq);
2062 int threshold;
2064 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2065 chan, chan->last_acked_seq, chan->buffer_seq);
2067 memset(&control, 0, sizeof(control));
2068 control.sframe = 1;
2070 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2071 chan->rx_state == L2CAP_RX_STATE_RECV) {
2072 __clear_ack_timer(chan);
2073 control.super = L2CAP_SUPER_RNR;
2074 control.reqseq = chan->buffer_seq;
2075 l2cap_send_sframe(chan, &control);
2076 } else {
2077 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2078 l2cap_ertm_send(chan);
2079 /* If any i-frames were sent, they included an ack */
2080 if (chan->buffer_seq == chan->last_acked_seq)
2081 frames_to_ack = 0;
2084 /* Ack now if the window is 3/4ths full.
2085 * Calculate without mul or div
2087 threshold = chan->ack_win;
2088 threshold += threshold << 1;
2089 threshold >>= 2;
2091 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2092 threshold);
2094 if (frames_to_ack >= threshold) {
2095 __clear_ack_timer(chan);
2096 control.super = L2CAP_SUPER_RR;
2097 control.reqseq = chan->buffer_seq;
2098 l2cap_send_sframe(chan, &control);
2099 frames_to_ack = 0;
2102 if (frames_to_ack)
2103 __set_ack_timer(chan);
2107 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2108 struct msghdr *msg, int len,
2109 int count, struct sk_buff *skb)
2111 struct l2cap_conn *conn = chan->conn;
2112 struct sk_buff **frag;
2113 int sent = 0;
2115 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2116 return -EFAULT;
2118 sent += count;
2119 len -= count;
2121 /* Continuation fragments (no L2CAP header) */
2122 frag = &skb_shinfo(skb)->frag_list;
2123 while (len) {
2124 struct sk_buff *tmp;
2126 count = min_t(unsigned int, conn->mtu, len);
2128 tmp = chan->ops->alloc_skb(chan, count,
2129 msg->msg_flags & MSG_DONTWAIT);
2130 if (IS_ERR(tmp))
2131 return PTR_ERR(tmp);
2133 *frag = tmp;
2135 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2136 return -EFAULT;
2138 (*frag)->priority = skb->priority;
2140 sent += count;
2141 len -= count;
2143 skb->len += (*frag)->len;
2144 skb->data_len += (*frag)->len;
2146 frag = &(*frag)->next;
2149 return sent;
2152 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2153 struct msghdr *msg, size_t len,
2154 u32 priority)
2156 struct l2cap_conn *conn = chan->conn;
2157 struct sk_buff *skb;
2158 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2159 struct l2cap_hdr *lh;
2161 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2162 __le16_to_cpu(chan->psm), len, priority);
2164 count = min_t(unsigned int, (conn->mtu - hlen), len);
2166 skb = chan->ops->alloc_skb(chan, count + hlen,
2167 msg->msg_flags & MSG_DONTWAIT);
2168 if (IS_ERR(skb))
2169 return skb;
2171 skb->priority = priority;
2173 /* Create L2CAP header */
2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2175 lh->cid = cpu_to_le16(chan->dcid);
2176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2184 return skb;
2187 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2188 struct msghdr *msg, size_t len,
2189 u32 priority)
2191 struct l2cap_conn *conn = chan->conn;
2192 struct sk_buff *skb;
2193 int err, count;
2194 struct l2cap_hdr *lh;
2196 BT_DBG("chan %p len %zu", chan, len);
2198 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2200 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2201 msg->msg_flags & MSG_DONTWAIT);
2202 if (IS_ERR(skb))
2203 return skb;
2205 skb->priority = priority;
2207 /* Create L2CAP header */
2208 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2209 lh->cid = cpu_to_le16(chan->dcid);
2210 lh->len = cpu_to_le16(len);
2212 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2213 if (unlikely(err < 0)) {
2214 kfree_skb(skb);
2215 return ERR_PTR(err);
2217 return skb;
2220 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2221 struct msghdr *msg, size_t len,
2222 u16 sdulen)
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2226 int err, count, hlen;
2227 struct l2cap_hdr *lh;
2229 BT_DBG("chan %p len %zu", chan, len);
2231 if (!conn)
2232 return ERR_PTR(-ENOTCONN);
2234 hlen = __ertm_hdr_size(chan);
2236 if (sdulen)
2237 hlen += L2CAP_SDULEN_SIZE;
2239 if (chan->fcs == L2CAP_FCS_CRC16)
2240 hlen += L2CAP_FCS_SIZE;
2242 count = min_t(unsigned int, (conn->mtu - hlen), len);
2244 skb = chan->ops->alloc_skb(chan, count + hlen,
2245 msg->msg_flags & MSG_DONTWAIT);
2246 if (IS_ERR(skb))
2247 return skb;
2249 /* Create L2CAP header */
2250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2251 lh->cid = cpu_to_le16(chan->dcid);
2252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2254 /* Control header is populated later */
2255 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2256 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2257 else
2258 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2260 if (sdulen)
2261 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2263 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2264 if (unlikely(err < 0)) {
2265 kfree_skb(skb);
2266 return ERR_PTR(err);
2269 bt_cb(skb)->control.fcs = chan->fcs;
2270 bt_cb(skb)->control.retries = 0;
2271 return skb;
2274 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2275 struct sk_buff_head *seg_queue,
2276 struct msghdr *msg, size_t len)
2278 struct sk_buff *skb;
2279 u16 sdu_len;
2280 size_t pdu_len;
2281 u8 sar;
2283 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2285 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2286 * so fragmented skbs are not used. The HCI layer's handling
2287 * of fragmented skbs is not compatible with ERTM's queueing.
2290 /* PDU size is derived from the HCI MTU */
2291 pdu_len = chan->conn->mtu;
2293 /* Constrain PDU size for BR/EDR connections */
2294 if (!chan->hs_hcon)
2295 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2297 /* Adjust for largest possible L2CAP overhead. */
2298 if (chan->fcs)
2299 pdu_len -= L2CAP_FCS_SIZE;
2301 pdu_len -= __ertm_hdr_size(chan);
2303 /* Remote device may have requested smaller PDUs */
2304 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2306 if (len <= pdu_len) {
2307 sar = L2CAP_SAR_UNSEGMENTED;
2308 sdu_len = 0;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_START;
2312 sdu_len = len;
2313 pdu_len -= L2CAP_SDULEN_SIZE;
2316 while (len > 0) {
2317 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2319 if (IS_ERR(skb)) {
2320 __skb_queue_purge(seg_queue);
2321 return PTR_ERR(skb);
2324 bt_cb(skb)->control.sar = sar;
2325 __skb_queue_tail(seg_queue, skb);
2327 len -= pdu_len;
2328 if (sdu_len) {
2329 sdu_len = 0;
2330 pdu_len += L2CAP_SDULEN_SIZE;
2333 if (len <= pdu_len) {
2334 sar = L2CAP_SAR_END;
2335 pdu_len = len;
2336 } else {
2337 sar = L2CAP_SAR_CONTINUE;
2341 return 0;
2344 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2345 struct msghdr *msg,
2346 size_t len, u16 sdulen)
2348 struct l2cap_conn *conn = chan->conn;
2349 struct sk_buff *skb;
2350 int err, count, hlen;
2351 struct l2cap_hdr *lh;
2353 BT_DBG("chan %p len %zu", chan, len);
2355 if (!conn)
2356 return ERR_PTR(-ENOTCONN);
2358 hlen = L2CAP_HDR_SIZE;
2360 if (sdulen)
2361 hlen += L2CAP_SDULEN_SIZE;
2363 count = min_t(unsigned int, (conn->mtu - hlen), len);
2365 skb = chan->ops->alloc_skb(chan, count + hlen,
2366 msg->msg_flags & MSG_DONTWAIT);
2367 if (IS_ERR(skb))
2368 return skb;
2370 /* Create L2CAP header */
2371 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2372 lh->cid = cpu_to_le16(chan->dcid);
2373 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2375 if (sdulen)
2376 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2378 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2379 if (unlikely(err < 0)) {
2380 kfree_skb(skb);
2381 return ERR_PTR(err);
2384 return skb;
2387 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2388 struct sk_buff_head *seg_queue,
2389 struct msghdr *msg, size_t len)
2391 struct sk_buff *skb;
2392 size_t pdu_len;
2393 u16 sdu_len;
2395 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2397 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2399 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2401 sdu_len = len;
2402 pdu_len -= L2CAP_SDULEN_SIZE;
2404 while (len > 0) {
2405 if (len <= pdu_len)
2406 pdu_len = len;
2408 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2409 if (IS_ERR(skb)) {
2410 __skb_queue_purge(seg_queue);
2411 return PTR_ERR(skb);
2414 __skb_queue_tail(seg_queue, skb);
2416 len -= pdu_len;
2418 if (sdu_len) {
2419 sdu_len = 0;
2420 pdu_len += L2CAP_SDULEN_SIZE;
2424 return 0;
2427 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2428 u32 priority)
2430 struct sk_buff *skb;
2431 int err;
2432 struct sk_buff_head seg_queue;
2434 if (!chan->conn)
2435 return -ENOTCONN;
2437 /* Connectionless channel */
2438 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2439 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2440 if (IS_ERR(skb))
2441 return PTR_ERR(skb);
2443 /* Channel lock is released before requesting new skb and then
2444 * reacquired thus we need to recheck channel state.
2446 if (chan->state != BT_CONNECTED) {
2447 kfree_skb(skb);
2448 return -ENOTCONN;
2451 l2cap_do_send(chan, skb);
2452 return len;
2455 switch (chan->mode) {
2456 case L2CAP_MODE_LE_FLOWCTL:
2457 /* Check outgoing MTU */
2458 if (len > chan->omtu)
2459 return -EMSGSIZE;
2461 if (!chan->tx_credits)
2462 return -EAGAIN;
2464 __skb_queue_head_init(&seg_queue);
2466 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2468 if (chan->state != BT_CONNECTED) {
2469 __skb_queue_purge(&seg_queue);
2470 err = -ENOTCONN;
2473 if (err)
2474 return err;
2476 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2478 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2479 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2480 chan->tx_credits--;
2483 if (!chan->tx_credits)
2484 chan->ops->suspend(chan);
2486 err = len;
2488 break;
2490 case L2CAP_MODE_BASIC:
2491 /* Check outgoing MTU */
2492 if (len > chan->omtu)
2493 return -EMSGSIZE;
2495 /* Create a basic PDU */
2496 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2497 if (IS_ERR(skb))
2498 return PTR_ERR(skb);
2500 /* Channel lock is released before requesting new skb and then
2501 * reacquired thus we need to recheck channel state.
2503 if (chan->state != BT_CONNECTED) {
2504 kfree_skb(skb);
2505 return -ENOTCONN;
2508 l2cap_do_send(chan, skb);
2509 err = len;
2510 break;
2512 case L2CAP_MODE_ERTM:
2513 case L2CAP_MODE_STREAMING:
2514 /* Check outgoing MTU */
2515 if (len > chan->omtu) {
2516 err = -EMSGSIZE;
2517 break;
2520 __skb_queue_head_init(&seg_queue);
2522 /* Do segmentation before calling in to the state machine,
2523 * since it's possible to block while waiting for memory
2524 * allocation.
2526 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2528 /* The channel could have been closed while segmenting,
2529 * check that it is still connected.
2531 if (chan->state != BT_CONNECTED) {
2532 __skb_queue_purge(&seg_queue);
2533 err = -ENOTCONN;
2536 if (err)
2537 break;
2539 if (chan->mode == L2CAP_MODE_ERTM)
2540 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2541 else
2542 l2cap_streaming_send(chan, &seg_queue);
2544 err = len;
2546 /* If the skbs were not queued for sending, they'll still be in
2547 * seg_queue and need to be purged.
2549 __skb_queue_purge(&seg_queue);
2550 break;
2552 default:
2553 BT_DBG("bad state %1.1x", chan->mode);
2554 err = -EBADFD;
2557 return err;
2560 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2562 struct l2cap_ctrl control;
2563 u16 seq;
2565 BT_DBG("chan %p, txseq %u", chan, txseq);
2567 memset(&control, 0, sizeof(control));
2568 control.sframe = 1;
2569 control.super = L2CAP_SUPER_SREJ;
2571 for (seq = chan->expected_tx_seq; seq != txseq;
2572 seq = __next_seq(chan, seq)) {
2573 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2574 control.reqseq = seq;
2575 l2cap_send_sframe(chan, &control);
2576 l2cap_seq_list_append(&chan->srej_list, seq);
2580 chan->expected_tx_seq = __next_seq(chan, txseq);
2583 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2585 struct l2cap_ctrl control;
2587 BT_DBG("chan %p", chan);
2589 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2590 return;
2592 memset(&control, 0, sizeof(control));
2593 control.sframe = 1;
2594 control.super = L2CAP_SUPER_SREJ;
2595 control.reqseq = chan->srej_list.tail;
2596 l2cap_send_sframe(chan, &control);
2599 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2601 struct l2cap_ctrl control;
2602 u16 initial_head;
2603 u16 seq;
2605 BT_DBG("chan %p, txseq %u", chan, txseq);
2607 memset(&control, 0, sizeof(control));
2608 control.sframe = 1;
2609 control.super = L2CAP_SUPER_SREJ;
2611 /* Capture initial list head to allow only one pass through the list. */
2612 initial_head = chan->srej_list.head;
2614 do {
2615 seq = l2cap_seq_list_pop(&chan->srej_list);
2616 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2617 break;
2619 control.reqseq = seq;
2620 l2cap_send_sframe(chan, &control);
2621 l2cap_seq_list_append(&chan->srej_list, seq);
2622 } while (chan->srej_list.head != initial_head);
2625 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2627 struct sk_buff *acked_skb;
2628 u16 ackseq;
2630 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2632 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2633 return;
2635 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2636 chan->expected_ack_seq, chan->unacked_frames);
2638 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2639 ackseq = __next_seq(chan, ackseq)) {
2641 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2642 if (acked_skb) {
2643 skb_unlink(acked_skb, &chan->tx_q);
2644 kfree_skb(acked_skb);
2645 chan->unacked_frames--;
2649 chan->expected_ack_seq = reqseq;
2651 if (chan->unacked_frames == 0)
2652 __clear_retrans_timer(chan);
2654 BT_DBG("unacked_frames %u", chan->unacked_frames);
2657 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2659 BT_DBG("chan %p", chan);
2661 chan->expected_tx_seq = chan->buffer_seq;
2662 l2cap_seq_list_clear(&chan->srej_list);
2663 skb_queue_purge(&chan->srej_q);
2664 chan->rx_state = L2CAP_RX_STATE_RECV;
2667 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2668 struct l2cap_ctrl *control,
2669 struct sk_buff_head *skbs, u8 event)
2671 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2672 event);
2674 switch (event) {
2675 case L2CAP_EV_DATA_REQUEST:
2676 if (chan->tx_send_head == NULL)
2677 chan->tx_send_head = skb_peek(skbs);
2679 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2680 l2cap_ertm_send(chan);
2681 break;
2682 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2683 BT_DBG("Enter LOCAL_BUSY");
2684 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2686 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2687 /* The SREJ_SENT state must be aborted if we are to
2688 * enter the LOCAL_BUSY state.
2690 l2cap_abort_rx_srej_sent(chan);
2693 l2cap_send_ack(chan);
2695 break;
2696 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2697 BT_DBG("Exit LOCAL_BUSY");
2698 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2700 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2701 struct l2cap_ctrl local_control;
2703 memset(&local_control, 0, sizeof(local_control));
2704 local_control.sframe = 1;
2705 local_control.super = L2CAP_SUPER_RR;
2706 local_control.poll = 1;
2707 local_control.reqseq = chan->buffer_seq;
2708 l2cap_send_sframe(chan, &local_control);
2710 chan->retry_count = 1;
2711 __set_monitor_timer(chan);
2712 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 break;
2715 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2716 l2cap_process_reqseq(chan, control->reqseq);
2717 break;
2718 case L2CAP_EV_EXPLICIT_POLL:
2719 l2cap_send_rr_or_rnr(chan, 1);
2720 chan->retry_count = 1;
2721 __set_monitor_timer(chan);
2722 __clear_ack_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724 break;
2725 case L2CAP_EV_RETRANS_TO:
2726 l2cap_send_rr_or_rnr(chan, 1);
2727 chan->retry_count = 1;
2728 __set_monitor_timer(chan);
2729 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2730 break;
2731 case L2CAP_EV_RECV_FBIT:
2732 /* Nothing to process */
2733 break;
2734 default:
2735 break;
2739 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2740 struct l2cap_ctrl *control,
2741 struct sk_buff_head *skbs, u8 event)
2743 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2744 event);
2746 switch (event) {
2747 case L2CAP_EV_DATA_REQUEST:
2748 if (chan->tx_send_head == NULL)
2749 chan->tx_send_head = skb_peek(skbs);
2750 /* Queue data, but don't send. */
2751 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2752 break;
2753 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2754 BT_DBG("Enter LOCAL_BUSY");
2755 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2757 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2758 /* The SREJ_SENT state must be aborted if we are to
2759 * enter the LOCAL_BUSY state.
2761 l2cap_abort_rx_srej_sent(chan);
2764 l2cap_send_ack(chan);
2766 break;
2767 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2768 BT_DBG("Exit LOCAL_BUSY");
2769 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2771 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2772 struct l2cap_ctrl local_control;
2773 memset(&local_control, 0, sizeof(local_control));
2774 local_control.sframe = 1;
2775 local_control.super = L2CAP_SUPER_RR;
2776 local_control.poll = 1;
2777 local_control.reqseq = chan->buffer_seq;
2778 l2cap_send_sframe(chan, &local_control);
2780 chan->retry_count = 1;
2781 __set_monitor_timer(chan);
2782 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2784 break;
2785 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2786 l2cap_process_reqseq(chan, control->reqseq);
2788 /* Fall through */
2790 case L2CAP_EV_RECV_FBIT:
2791 if (control && control->final) {
2792 __clear_monitor_timer(chan);
2793 if (chan->unacked_frames > 0)
2794 __set_retrans_timer(chan);
2795 chan->retry_count = 0;
2796 chan->tx_state = L2CAP_TX_STATE_XMIT;
2797 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2799 break;
2800 case L2CAP_EV_EXPLICIT_POLL:
2801 /* Ignore */
2802 break;
2803 case L2CAP_EV_MONITOR_TO:
2804 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2805 l2cap_send_rr_or_rnr(chan, 1);
2806 __set_monitor_timer(chan);
2807 chan->retry_count++;
2808 } else {
2809 l2cap_send_disconn_req(chan, ECONNABORTED);
2811 break;
2812 default:
2813 break;
2817 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2818 struct sk_buff_head *skbs, u8 event)
2820 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2821 chan, control, skbs, event, chan->tx_state);
2823 switch (chan->tx_state) {
2824 case L2CAP_TX_STATE_XMIT:
2825 l2cap_tx_state_xmit(chan, control, skbs, event);
2826 break;
2827 case L2CAP_TX_STATE_WAIT_F:
2828 l2cap_tx_state_wait_f(chan, control, skbs, event);
2829 break;
2830 default:
2831 /* Ignore event */
2832 break;
2836 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2837 struct l2cap_ctrl *control)
2839 BT_DBG("chan %p, control %p", chan, control);
2840 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2843 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2844 struct l2cap_ctrl *control)
2846 BT_DBG("chan %p, control %p", chan, control);
2847 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2850 /* Copy frame to all raw sockets on that connection */
2851 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2853 struct sk_buff *nskb;
2854 struct l2cap_chan *chan;
2856 BT_DBG("conn %p", conn);
2858 mutex_lock(&conn->chan_lock);
2860 list_for_each_entry(chan, &conn->chan_l, list) {
2861 if (chan->chan_type != L2CAP_CHAN_RAW)
2862 continue;
2864 /* Don't send frame to the channel it came from */
2865 if (bt_cb(skb)->chan == chan)
2866 continue;
2868 nskb = skb_clone(skb, GFP_KERNEL);
2869 if (!nskb)
2870 continue;
2871 if (chan->ops->recv(chan, nskb))
2872 kfree_skb(nskb);
2875 mutex_unlock(&conn->chan_lock);
2878 /* ---- L2CAP signalling commands ---- */
2879 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2880 u8 ident, u16 dlen, void *data)
2882 struct sk_buff *skb, **frag;
2883 struct l2cap_cmd_hdr *cmd;
2884 struct l2cap_hdr *lh;
2885 int len, count;
2887 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2888 conn, code, ident, dlen);
2890 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2891 return NULL;
2893 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2894 count = min_t(unsigned int, conn->mtu, len);
2896 skb = bt_skb_alloc(count, GFP_KERNEL);
2897 if (!skb)
2898 return NULL;
2900 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2901 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2903 if (conn->hcon->type == LE_LINK)
2904 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2905 else
2906 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2908 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2909 cmd->code = code;
2910 cmd->ident = ident;
2911 cmd->len = cpu_to_le16(dlen);
2913 if (dlen) {
2914 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2915 memcpy(skb_put(skb, count), data, count);
2916 data += count;
2919 len -= skb->len;
2921 /* Continuation fragments (no L2CAP header) */
2922 frag = &skb_shinfo(skb)->frag_list;
2923 while (len) {
2924 count = min_t(unsigned int, conn->mtu, len);
2926 *frag = bt_skb_alloc(count, GFP_KERNEL);
2927 if (!*frag)
2928 goto fail;
2930 memcpy(skb_put(*frag, count), data, count);
2932 len -= count;
2933 data += count;
2935 frag = &(*frag)->next;
2938 return skb;
2940 fail:
2941 kfree_skb(skb);
2942 return NULL;
2945 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2946 unsigned long *val)
2948 struct l2cap_conf_opt *opt = *ptr;
2949 int len;
2951 len = L2CAP_CONF_OPT_SIZE + opt->len;
2952 *ptr += len;
2954 *type = opt->type;
2955 *olen = opt->len;
2957 switch (opt->len) {
2958 case 1:
2959 *val = *((u8 *) opt->val);
2960 break;
2962 case 2:
2963 *val = get_unaligned_le16(opt->val);
2964 break;
2966 case 4:
2967 *val = get_unaligned_le32(opt->val);
2968 break;
2970 default:
2971 *val = (unsigned long) opt->val;
2972 break;
2975 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2976 return len;
2979 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2981 struct l2cap_conf_opt *opt = *ptr;
2983 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2985 opt->type = type;
2986 opt->len = len;
2988 switch (len) {
2989 case 1:
2990 *((u8 *) opt->val) = val;
2991 break;
2993 case 2:
2994 put_unaligned_le16(val, opt->val);
2995 break;
2997 case 4:
2998 put_unaligned_le32(val, opt->val);
2999 break;
3001 default:
3002 memcpy(opt->val, (void *) val, len);
3003 break;
3006 *ptr += L2CAP_CONF_OPT_SIZE + len;
3009 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3011 struct l2cap_conf_efs efs;
3013 switch (chan->mode) {
3014 case L2CAP_MODE_ERTM:
3015 efs.id = chan->local_id;
3016 efs.stype = chan->local_stype;
3017 efs.msdu = cpu_to_le16(chan->local_msdu);
3018 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3019 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3020 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3021 break;
3023 case L2CAP_MODE_STREAMING:
3024 efs.id = 1;
3025 efs.stype = L2CAP_SERV_BESTEFFORT;
3026 efs.msdu = cpu_to_le16(chan->local_msdu);
3027 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3028 efs.acc_lat = 0;
3029 efs.flush_to = 0;
3030 break;
3032 default:
3033 return;
3036 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3037 (unsigned long) &efs);
3040 static void l2cap_ack_timeout(struct work_struct *work)
3042 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3043 ack_timer.work);
3044 u16 frames_to_ack;
3046 BT_DBG("chan %p", chan);
3048 l2cap_chan_lock(chan);
3050 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3051 chan->last_acked_seq);
3053 if (frames_to_ack)
3054 l2cap_send_rr_or_rnr(chan, 0);
3056 l2cap_chan_unlock(chan);
3057 l2cap_chan_put(chan);
3060 int l2cap_ertm_init(struct l2cap_chan *chan)
3062 int err;
3064 chan->next_tx_seq = 0;
3065 chan->expected_tx_seq = 0;
3066 chan->expected_ack_seq = 0;
3067 chan->unacked_frames = 0;
3068 chan->buffer_seq = 0;
3069 chan->frames_sent = 0;
3070 chan->last_acked_seq = 0;
3071 chan->sdu = NULL;
3072 chan->sdu_last_frag = NULL;
3073 chan->sdu_len = 0;
3075 skb_queue_head_init(&chan->tx_q);
3077 chan->local_amp_id = AMP_ID_BREDR;
3078 chan->move_id = AMP_ID_BREDR;
3079 chan->move_state = L2CAP_MOVE_STABLE;
3080 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3082 if (chan->mode != L2CAP_MODE_ERTM)
3083 return 0;
3085 chan->rx_state = L2CAP_RX_STATE_RECV;
3086 chan->tx_state = L2CAP_TX_STATE_XMIT;
3088 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3089 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3090 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3092 skb_queue_head_init(&chan->srej_q);
3094 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3095 if (err < 0)
3096 return err;
3098 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3099 if (err < 0)
3100 l2cap_seq_list_free(&chan->srej_list);
3102 return err;
3105 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3107 switch (mode) {
3108 case L2CAP_MODE_STREAMING:
3109 case L2CAP_MODE_ERTM:
3110 if (l2cap_mode_supported(mode, remote_feat_mask))
3111 return mode;
3112 /* fall through */
3113 default:
3114 return L2CAP_MODE_BASIC;
3118 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3120 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3123 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3125 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3128 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3129 struct l2cap_conf_rfc *rfc)
3131 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3132 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3134 /* Class 1 devices have must have ERTM timeouts
3135 * exceeding the Link Supervision Timeout. The
3136 * default Link Supervision Timeout for AMP
3137 * controllers is 10 seconds.
3139 * Class 1 devices use 0xffffffff for their
3140 * best-effort flush timeout, so the clamping logic
3141 * will result in a timeout that meets the above
3142 * requirement. ERTM timeouts are 16-bit values, so
3143 * the maximum timeout is 65.535 seconds.
3146 /* Convert timeout to milliseconds and round */
3147 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3149 /* This is the recommended formula for class 2 devices
3150 * that start ERTM timers when packets are sent to the
3151 * controller.
3153 ertm_to = 3 * ertm_to + 500;
3155 if (ertm_to > 0xffff)
3156 ertm_to = 0xffff;
3158 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3159 rfc->monitor_timeout = rfc->retrans_timeout;
3160 } else {
3161 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3162 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3166 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3168 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3169 __l2cap_ews_supported(chan->conn)) {
3170 /* use extended control field */
3171 set_bit(FLAG_EXT_CTRL, &chan->flags);
3172 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3173 } else {
3174 chan->tx_win = min_t(u16, chan->tx_win,
3175 L2CAP_DEFAULT_TX_WINDOW);
3176 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3178 chan->ack_win = chan->tx_win;
3181 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3183 struct l2cap_conf_req *req = data;
3184 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3185 void *ptr = req->data;
3186 u16 size;
3188 BT_DBG("chan %p", chan);
3190 if (chan->num_conf_req || chan->num_conf_rsp)
3191 goto done;
3193 switch (chan->mode) {
3194 case L2CAP_MODE_STREAMING:
3195 case L2CAP_MODE_ERTM:
3196 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3197 break;
3199 if (__l2cap_efs_supported(chan->conn))
3200 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3202 /* fall through */
3203 default:
3204 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3205 break;
3208 done:
3209 if (chan->imtu != L2CAP_DEFAULT_MTU)
3210 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3212 switch (chan->mode) {
3213 case L2CAP_MODE_BASIC:
3214 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3215 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3216 break;
3218 rfc.mode = L2CAP_MODE_BASIC;
3219 rfc.txwin_size = 0;
3220 rfc.max_transmit = 0;
3221 rfc.retrans_timeout = 0;
3222 rfc.monitor_timeout = 0;
3223 rfc.max_pdu_size = 0;
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3226 (unsigned long) &rfc);
3227 break;
3229 case L2CAP_MODE_ERTM:
3230 rfc.mode = L2CAP_MODE_ERTM;
3231 rfc.max_transmit = chan->max_tx;
3233 __l2cap_set_ertm_timeouts(chan, &rfc);
3235 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3236 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3237 L2CAP_FCS_SIZE);
3238 rfc.max_pdu_size = cpu_to_le16(size);
3240 l2cap_txwin_setup(chan);
3242 rfc.txwin_size = min_t(u16, chan->tx_win,
3243 L2CAP_DEFAULT_TX_WINDOW);
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3248 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 l2cap_add_opt_efs(&ptr, chan);
3251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3253 chan->tx_win);
3255 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 if (chan->fcs == L2CAP_FCS_NONE ||
3257 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 chan->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3260 chan->fcs);
3262 break;
3264 case L2CAP_MODE_STREAMING:
3265 l2cap_txwin_setup(chan);
3266 rfc.mode = L2CAP_MODE_STREAMING;
3267 rfc.txwin_size = 0;
3268 rfc.max_transmit = 0;
3269 rfc.retrans_timeout = 0;
3270 rfc.monitor_timeout = 0;
3272 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3273 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3274 L2CAP_FCS_SIZE);
3275 rfc.max_pdu_size = cpu_to_le16(size);
3277 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3278 (unsigned long) &rfc);
3280 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3281 l2cap_add_opt_efs(&ptr, chan);
3283 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3284 if (chan->fcs == L2CAP_FCS_NONE ||
3285 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3286 chan->fcs = L2CAP_FCS_NONE;
3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3288 chan->fcs);
3290 break;
3293 req->dcid = cpu_to_le16(chan->dcid);
3294 req->flags = cpu_to_le16(0);
3296 return ptr - data;
3299 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3301 struct l2cap_conf_rsp *rsp = data;
3302 void *ptr = rsp->data;
3303 void *req = chan->conf_req;
3304 int len = chan->conf_len;
3305 int type, hint, olen;
3306 unsigned long val;
3307 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3308 struct l2cap_conf_efs efs;
3309 u8 remote_efs = 0;
3310 u16 mtu = L2CAP_DEFAULT_MTU;
3311 u16 result = L2CAP_CONF_SUCCESS;
3312 u16 size;
3314 BT_DBG("chan %p", chan);
3316 while (len >= L2CAP_CONF_OPT_SIZE) {
3317 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3319 hint = type & L2CAP_CONF_HINT;
3320 type &= L2CAP_CONF_MASK;
3322 switch (type) {
3323 case L2CAP_CONF_MTU:
3324 mtu = val;
3325 break;
3327 case L2CAP_CONF_FLUSH_TO:
3328 chan->flush_to = val;
3329 break;
3331 case L2CAP_CONF_QOS:
3332 break;
3334 case L2CAP_CONF_RFC:
3335 if (olen == sizeof(rfc))
3336 memcpy(&rfc, (void *) val, olen);
3337 break;
3339 case L2CAP_CONF_FCS:
3340 if (val == L2CAP_FCS_NONE)
3341 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3342 break;
3344 case L2CAP_CONF_EFS:
3345 remote_efs = 1;
3346 if (olen == sizeof(efs))
3347 memcpy(&efs, (void *) val, olen);
3348 break;
3350 case L2CAP_CONF_EWS:
3351 if (!chan->conn->hs_enabled)
3352 return -ECONNREFUSED;
3354 set_bit(FLAG_EXT_CTRL, &chan->flags);
3355 set_bit(CONF_EWS_RECV, &chan->conf_state);
3356 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3357 chan->remote_tx_win = val;
3358 break;
3360 default:
3361 if (hint)
3362 break;
3364 result = L2CAP_CONF_UNKNOWN;
3365 *((u8 *) ptr++) = type;
3366 break;
3370 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3371 goto done;
3373 switch (chan->mode) {
3374 case L2CAP_MODE_STREAMING:
3375 case L2CAP_MODE_ERTM:
3376 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3377 chan->mode = l2cap_select_mode(rfc.mode,
3378 chan->conn->feat_mask);
3379 break;
3382 if (remote_efs) {
3383 if (__l2cap_efs_supported(chan->conn))
3384 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3385 else
3386 return -ECONNREFUSED;
3389 if (chan->mode != rfc.mode)
3390 return -ECONNREFUSED;
3392 break;
3395 done:
3396 if (chan->mode != rfc.mode) {
3397 result = L2CAP_CONF_UNACCEPT;
3398 rfc.mode = chan->mode;
3400 if (chan->num_conf_rsp == 1)
3401 return -ECONNREFUSED;
3403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3404 (unsigned long) &rfc);
3407 if (result == L2CAP_CONF_SUCCESS) {
3408 /* Configure output options and let the other side know
3409 * which ones we don't like. */
3411 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3412 result = L2CAP_CONF_UNACCEPT;
3413 else {
3414 chan->omtu = mtu;
3415 set_bit(CONF_MTU_DONE, &chan->conf_state);
3417 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3419 if (remote_efs) {
3420 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3421 efs.stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != chan->local_stype) {
3424 result = L2CAP_CONF_UNACCEPT;
3426 if (chan->num_conf_req >= 1)
3427 return -ECONNREFUSED;
3429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3430 sizeof(efs),
3431 (unsigned long) &efs);
3432 } else {
3433 /* Send PENDING Conf Rsp */
3434 result = L2CAP_CONF_PENDING;
3435 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3439 switch (rfc.mode) {
3440 case L2CAP_MODE_BASIC:
3441 chan->fcs = L2CAP_FCS_NONE;
3442 set_bit(CONF_MODE_DONE, &chan->conf_state);
3443 break;
3445 case L2CAP_MODE_ERTM:
3446 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3447 chan->remote_tx_win = rfc.txwin_size;
3448 else
3449 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3451 chan->remote_max_tx = rfc.max_transmit;
3453 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3454 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3455 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3456 rfc.max_pdu_size = cpu_to_le16(size);
3457 chan->remote_mps = size;
3459 __l2cap_set_ertm_timeouts(chan, &rfc);
3461 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3464 sizeof(rfc), (unsigned long) &rfc);
3466 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3467 chan->remote_id = efs.id;
3468 chan->remote_stype = efs.stype;
3469 chan->remote_msdu = le16_to_cpu(efs.msdu);
3470 chan->remote_flush_to =
3471 le32_to_cpu(efs.flush_to);
3472 chan->remote_acc_lat =
3473 le32_to_cpu(efs.acc_lat);
3474 chan->remote_sdu_itime =
3475 le32_to_cpu(efs.sdu_itime);
3476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3477 sizeof(efs),
3478 (unsigned long) &efs);
3480 break;
3482 case L2CAP_MODE_STREAMING:
3483 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3484 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3485 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3486 rfc.max_pdu_size = cpu_to_le16(size);
3487 chan->remote_mps = size;
3489 set_bit(CONF_MODE_DONE, &chan->conf_state);
3491 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3492 (unsigned long) &rfc);
3494 break;
3496 default:
3497 result = L2CAP_CONF_UNACCEPT;
3499 memset(&rfc, 0, sizeof(rfc));
3500 rfc.mode = chan->mode;
3503 if (result == L2CAP_CONF_SUCCESS)
3504 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3506 rsp->scid = cpu_to_le16(chan->dcid);
3507 rsp->result = cpu_to_le16(result);
3508 rsp->flags = cpu_to_le16(0);
3510 return ptr - data;
3513 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3514 void *data, u16 *result)
3516 struct l2cap_conf_req *req = data;
3517 void *ptr = req->data;
3518 int type, olen;
3519 unsigned long val;
3520 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3521 struct l2cap_conf_efs efs;
3523 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3525 while (len >= L2CAP_CONF_OPT_SIZE) {
3526 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3528 switch (type) {
3529 case L2CAP_CONF_MTU:
3530 if (val < L2CAP_DEFAULT_MIN_MTU) {
3531 *result = L2CAP_CONF_UNACCEPT;
3532 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3533 } else
3534 chan->imtu = val;
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3536 break;
3538 case L2CAP_CONF_FLUSH_TO:
3539 chan->flush_to = val;
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3541 2, chan->flush_to);
3542 break;
3544 case L2CAP_CONF_RFC:
3545 if (olen == sizeof(rfc))
3546 memcpy(&rfc, (void *)val, olen);
3548 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3549 rfc.mode != chan->mode)
3550 return -ECONNREFUSED;
3552 chan->fcs = 0;
3554 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3555 sizeof(rfc), (unsigned long) &rfc);
3556 break;
3558 case L2CAP_CONF_EWS:
3559 chan->ack_win = min_t(u16, val, chan->ack_win);
3560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3561 chan->tx_win);
3562 break;
3564 case L2CAP_CONF_EFS:
3565 if (olen == sizeof(efs))
3566 memcpy(&efs, (void *)val, olen);
3568 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3569 efs.stype != L2CAP_SERV_NOTRAFIC &&
3570 efs.stype != chan->local_stype)
3571 return -ECONNREFUSED;
3573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3574 (unsigned long) &efs);
3575 break;
3577 case L2CAP_CONF_FCS:
3578 if (*result == L2CAP_CONF_PENDING)
3579 if (val == L2CAP_FCS_NONE)
3580 set_bit(CONF_RECV_NO_FCS,
3581 &chan->conf_state);
3582 break;
3586 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3587 return -ECONNREFUSED;
3589 chan->mode = rfc.mode;
3591 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3592 switch (rfc.mode) {
3593 case L2CAP_MODE_ERTM:
3594 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3595 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3596 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3597 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3598 chan->ack_win = min_t(u16, chan->ack_win,
3599 rfc.txwin_size);
3601 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3602 chan->local_msdu = le16_to_cpu(efs.msdu);
3603 chan->local_sdu_itime =
3604 le32_to_cpu(efs.sdu_itime);
3605 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3606 chan->local_flush_to =
3607 le32_to_cpu(efs.flush_to);
3609 break;
3611 case L2CAP_MODE_STREAMING:
3612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3616 req->dcid = cpu_to_le16(chan->dcid);
3617 req->flags = cpu_to_le16(0);
3619 return ptr - data;
3622 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3623 u16 result, u16 flags)
3625 struct l2cap_conf_rsp *rsp = data;
3626 void *ptr = rsp->data;
3628 BT_DBG("chan %p", chan);
3630 rsp->scid = cpu_to_le16(chan->dcid);
3631 rsp->result = cpu_to_le16(result);
3632 rsp->flags = cpu_to_le16(flags);
3634 return ptr - data;
3637 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3639 struct l2cap_le_conn_rsp rsp;
3640 struct l2cap_conn *conn = chan->conn;
3642 BT_DBG("chan %p", chan);
3644 rsp.dcid = cpu_to_le16(chan->scid);
3645 rsp.mtu = cpu_to_le16(chan->imtu);
3646 rsp.mps = cpu_to_le16(chan->mps);
3647 rsp.credits = cpu_to_le16(chan->rx_credits);
3648 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3650 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3651 &rsp);
3654 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3656 struct l2cap_conn_rsp rsp;
3657 struct l2cap_conn *conn = chan->conn;
3658 u8 buf[128];
3659 u8 rsp_code;
3661 rsp.scid = cpu_to_le16(chan->dcid);
3662 rsp.dcid = cpu_to_le16(chan->scid);
3663 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3664 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3666 if (chan->hs_hcon)
3667 rsp_code = L2CAP_CREATE_CHAN_RSP;
3668 else
3669 rsp_code = L2CAP_CONN_RSP;
3671 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3673 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3675 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3676 return;
3678 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3679 l2cap_build_conf_req(chan, buf), buf);
3680 chan->num_conf_req++;
3683 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3685 int type, olen;
3686 unsigned long val;
3687 /* Use sane default values in case a misbehaving remote device
3688 * did not send an RFC or extended window size option.
3690 u16 txwin_ext = chan->ack_win;
3691 struct l2cap_conf_rfc rfc = {
3692 .mode = chan->mode,
3693 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3694 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3695 .max_pdu_size = cpu_to_le16(chan->imtu),
3696 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3699 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3701 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3702 return;
3704 while (len >= L2CAP_CONF_OPT_SIZE) {
3705 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3707 switch (type) {
3708 case L2CAP_CONF_RFC:
3709 if (olen == sizeof(rfc))
3710 memcpy(&rfc, (void *)val, olen);
3711 break;
3712 case L2CAP_CONF_EWS:
3713 txwin_ext = val;
3714 break;
3718 switch (rfc.mode) {
3719 case L2CAP_MODE_ERTM:
3720 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3721 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3724 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3725 else
3726 chan->ack_win = min_t(u16, chan->ack_win,
3727 rfc.txwin_size);
3728 break;
3729 case L2CAP_MODE_STREAMING:
3730 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3734 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3735 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3736 u8 *data)
3738 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3740 if (cmd_len < sizeof(*rej))
3741 return -EPROTO;
3743 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3744 return 0;
3746 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3747 cmd->ident == conn->info_ident) {
3748 cancel_delayed_work(&conn->info_timer);
3750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3751 conn->info_ident = 0;
3753 l2cap_conn_start(conn);
3756 return 0;
3759 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3760 struct l2cap_cmd_hdr *cmd,
3761 u8 *data, u8 rsp_code, u8 amp_id)
3763 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3764 struct l2cap_conn_rsp rsp;
3765 struct l2cap_chan *chan = NULL, *pchan;
3766 int result, status = L2CAP_CS_NO_INFO;
3768 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3769 __le16 psm = req->psm;
3771 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3773 /* Check if we have socket listening on psm */
3774 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3775 &conn->hcon->dst, ACL_LINK);
3776 if (!pchan) {
3777 result = L2CAP_CR_BAD_PSM;
3778 goto sendresp;
3781 mutex_lock(&conn->chan_lock);
3782 l2cap_chan_lock(pchan);
3784 /* Check if the ACL is secure enough (if not SDP) */
3785 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3786 !hci_conn_check_link_mode(conn->hcon)) {
3787 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3788 result = L2CAP_CR_SEC_BLOCK;
3789 goto response;
3792 result = L2CAP_CR_NO_MEM;
3794 /* Check if we already have channel with that dcid */
3795 if (__l2cap_get_chan_by_dcid(conn, scid))
3796 goto response;
3798 chan = pchan->ops->new_connection(pchan);
3799 if (!chan)
3800 goto response;
3802 /* For certain devices (ex: HID mouse), support for authentication,
3803 * pairing and bonding is optional. For such devices, inorder to avoid
3804 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3805 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3807 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3809 bacpy(&chan->src, &conn->hcon->src);
3810 bacpy(&chan->dst, &conn->hcon->dst);
3811 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3812 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3813 chan->psm = psm;
3814 chan->dcid = scid;
3815 chan->local_amp_id = amp_id;
3817 __l2cap_chan_add(conn, chan);
3819 dcid = chan->scid;
3821 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3823 chan->ident = cmd->ident;
3825 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3826 if (l2cap_chan_check_security(chan)) {
3827 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3828 l2cap_state_change(chan, BT_CONNECT2);
3829 result = L2CAP_CR_PEND;
3830 status = L2CAP_CS_AUTHOR_PEND;
3831 chan->ops->defer(chan);
3832 } else {
3833 /* Force pending result for AMP controllers.
3834 * The connection will succeed after the
3835 * physical link is up.
3837 if (amp_id == AMP_ID_BREDR) {
3838 l2cap_state_change(chan, BT_CONFIG);
3839 result = L2CAP_CR_SUCCESS;
3840 } else {
3841 l2cap_state_change(chan, BT_CONNECT2);
3842 result = L2CAP_CR_PEND;
3844 status = L2CAP_CS_NO_INFO;
3846 } else {
3847 l2cap_state_change(chan, BT_CONNECT2);
3848 result = L2CAP_CR_PEND;
3849 status = L2CAP_CS_AUTHEN_PEND;
3851 } else {
3852 l2cap_state_change(chan, BT_CONNECT2);
3853 result = L2CAP_CR_PEND;
3854 status = L2CAP_CS_NO_INFO;
3857 response:
3858 l2cap_chan_unlock(pchan);
3859 mutex_unlock(&conn->chan_lock);
3861 sendresp:
3862 rsp.scid = cpu_to_le16(scid);
3863 rsp.dcid = cpu_to_le16(dcid);
3864 rsp.result = cpu_to_le16(result);
3865 rsp.status = cpu_to_le16(status);
3866 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3869 struct l2cap_info_req info;
3870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3873 conn->info_ident = l2cap_get_ident(conn);
3875 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3877 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3878 sizeof(info), &info);
3881 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3882 result == L2CAP_CR_SUCCESS) {
3883 u8 buf[128];
3884 set_bit(CONF_REQ_SENT, &chan->conf_state);
3885 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3886 l2cap_build_conf_req(chan, buf), buf);
3887 chan->num_conf_req++;
3890 return chan;
3893 static int l2cap_connect_req(struct l2cap_conn *conn,
3894 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3896 struct hci_dev *hdev = conn->hcon->hdev;
3897 struct hci_conn *hcon = conn->hcon;
3899 if (cmd_len < sizeof(struct l2cap_conn_req))
3900 return -EPROTO;
3902 hci_dev_lock(hdev);
3903 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3904 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3905 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3906 hcon->dst_type, 0, NULL, 0,
3907 hcon->dev_class);
3908 hci_dev_unlock(hdev);
3910 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3911 return 0;
3914 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3915 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3916 u8 *data)
3918 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3919 u16 scid, dcid, result, status;
3920 struct l2cap_chan *chan;
3921 u8 req[128];
3922 int err;
3924 if (cmd_len < sizeof(*rsp))
3925 return -EPROTO;
3927 scid = __le16_to_cpu(rsp->scid);
3928 dcid = __le16_to_cpu(rsp->dcid);
3929 result = __le16_to_cpu(rsp->result);
3930 status = __le16_to_cpu(rsp->status);
3932 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3933 dcid, scid, result, status);
3935 mutex_lock(&conn->chan_lock);
3937 if (scid) {
3938 chan = __l2cap_get_chan_by_scid(conn, scid);
3939 if (!chan) {
3940 err = -EBADSLT;
3941 goto unlock;
3943 } else {
3944 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3945 if (!chan) {
3946 err = -EBADSLT;
3947 goto unlock;
3951 err = 0;
3953 l2cap_chan_lock(chan);
3955 switch (result) {
3956 case L2CAP_CR_SUCCESS:
3957 l2cap_state_change(chan, BT_CONFIG);
3958 chan->ident = 0;
3959 chan->dcid = dcid;
3960 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3962 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3963 break;
3965 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3966 l2cap_build_conf_req(chan, req), req);
3967 chan->num_conf_req++;
3968 break;
3970 case L2CAP_CR_PEND:
3971 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3972 break;
3974 default:
3975 l2cap_chan_del(chan, ECONNREFUSED);
3976 break;
3979 l2cap_chan_unlock(chan);
3981 unlock:
3982 mutex_unlock(&conn->chan_lock);
3984 return err;
3987 static inline void set_default_fcs(struct l2cap_chan *chan)
3989 /* FCS is enabled only in ERTM or streaming mode, if one or both
3990 * sides request it.
3992 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3993 chan->fcs = L2CAP_FCS_NONE;
3994 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3995 chan->fcs = L2CAP_FCS_CRC16;
3998 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3999 u8 ident, u16 flags)
4001 struct l2cap_conn *conn = chan->conn;
4003 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4004 flags);
4006 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4007 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4009 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4010 l2cap_build_conf_rsp(chan, data,
4011 L2CAP_CONF_SUCCESS, flags), data);
4014 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4015 u16 scid, u16 dcid)
4017 struct l2cap_cmd_rej_cid rej;
4019 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4020 rej.scid = __cpu_to_le16(scid);
4021 rej.dcid = __cpu_to_le16(dcid);
4023 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4026 static inline int l2cap_config_req(struct l2cap_conn *conn,
4027 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4028 u8 *data)
4030 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4031 u16 dcid, flags;
4032 u8 rsp[64];
4033 struct l2cap_chan *chan;
4034 int len, err = 0;
4036 if (cmd_len < sizeof(*req))
4037 return -EPROTO;
4039 dcid = __le16_to_cpu(req->dcid);
4040 flags = __le16_to_cpu(req->flags);
4042 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4044 chan = l2cap_get_chan_by_scid(conn, dcid);
4045 if (!chan) {
4046 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4047 return 0;
4050 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4051 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4052 chan->dcid);
4053 goto unlock;
4056 /* Reject if config buffer is too small. */
4057 len = cmd_len - sizeof(*req);
4058 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4059 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4060 l2cap_build_conf_rsp(chan, rsp,
4061 L2CAP_CONF_REJECT, flags), rsp);
4062 goto unlock;
4065 /* Store config. */
4066 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4067 chan->conf_len += len;
4069 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4070 /* Incomplete config. Send empty response. */
4071 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4072 l2cap_build_conf_rsp(chan, rsp,
4073 L2CAP_CONF_SUCCESS, flags), rsp);
4074 goto unlock;
4077 /* Complete config. */
4078 len = l2cap_parse_conf_req(chan, rsp);
4079 if (len < 0) {
4080 l2cap_send_disconn_req(chan, ECONNRESET);
4081 goto unlock;
4084 chan->ident = cmd->ident;
4085 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4086 chan->num_conf_rsp++;
4088 /* Reset config buffer. */
4089 chan->conf_len = 0;
4091 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4092 goto unlock;
4094 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4095 set_default_fcs(chan);
4097 if (chan->mode == L2CAP_MODE_ERTM ||
4098 chan->mode == L2CAP_MODE_STREAMING)
4099 err = l2cap_ertm_init(chan);
4101 if (err < 0)
4102 l2cap_send_disconn_req(chan, -err);
4103 else
4104 l2cap_chan_ready(chan);
4106 goto unlock;
4109 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4110 u8 buf[64];
4111 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4112 l2cap_build_conf_req(chan, buf), buf);
4113 chan->num_conf_req++;
4116 /* Got Conf Rsp PENDING from remote side and asume we sent
4117 Conf Rsp PENDING in the code above */
4118 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4119 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4121 /* check compatibility */
4123 /* Send rsp for BR/EDR channel */
4124 if (!chan->hs_hcon)
4125 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4126 else
4127 chan->ident = cmd->ident;
4130 unlock:
4131 l2cap_chan_unlock(chan);
4132 return err;
4135 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4136 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4137 u8 *data)
4139 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4140 u16 scid, flags, result;
4141 struct l2cap_chan *chan;
4142 int len = cmd_len - sizeof(*rsp);
4143 int err = 0;
4145 if (cmd_len < sizeof(*rsp))
4146 return -EPROTO;
4148 scid = __le16_to_cpu(rsp->scid);
4149 flags = __le16_to_cpu(rsp->flags);
4150 result = __le16_to_cpu(rsp->result);
4152 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4153 result, len);
4155 chan = l2cap_get_chan_by_scid(conn, scid);
4156 if (!chan)
4157 return 0;
4159 switch (result) {
4160 case L2CAP_CONF_SUCCESS:
4161 l2cap_conf_rfc_get(chan, rsp->data, len);
4162 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4163 break;
4165 case L2CAP_CONF_PENDING:
4166 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4168 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4169 char buf[64];
4171 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4172 buf, &result);
4173 if (len < 0) {
4174 l2cap_send_disconn_req(chan, ECONNRESET);
4175 goto done;
4178 if (!chan->hs_hcon) {
4179 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4181 } else {
4182 if (l2cap_check_efs(chan)) {
4183 amp_create_logical_link(chan);
4184 chan->ident = cmd->ident;
4188 goto done;
4190 case L2CAP_CONF_UNACCEPT:
4191 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4192 char req[64];
4194 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4195 l2cap_send_disconn_req(chan, ECONNRESET);
4196 goto done;
4199 /* throw out any old stored conf requests */
4200 result = L2CAP_CONF_SUCCESS;
4201 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4202 req, &result);
4203 if (len < 0) {
4204 l2cap_send_disconn_req(chan, ECONNRESET);
4205 goto done;
4208 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4209 L2CAP_CONF_REQ, len, req);
4210 chan->num_conf_req++;
4211 if (result != L2CAP_CONF_SUCCESS)
4212 goto done;
4213 break;
4216 default:
4217 l2cap_chan_set_err(chan, ECONNRESET);
4219 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4220 l2cap_send_disconn_req(chan, ECONNRESET);
4221 goto done;
4224 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4225 goto done;
4227 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4229 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4230 set_default_fcs(chan);
4232 if (chan->mode == L2CAP_MODE_ERTM ||
4233 chan->mode == L2CAP_MODE_STREAMING)
4234 err = l2cap_ertm_init(chan);
4236 if (err < 0)
4237 l2cap_send_disconn_req(chan, -err);
4238 else
4239 l2cap_chan_ready(chan);
4242 done:
4243 l2cap_chan_unlock(chan);
4244 return err;
4247 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4249 u8 *data)
4251 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4252 struct l2cap_disconn_rsp rsp;
4253 u16 dcid, scid;
4254 struct l2cap_chan *chan;
4256 if (cmd_len != sizeof(*req))
4257 return -EPROTO;
4259 scid = __le16_to_cpu(req->scid);
4260 dcid = __le16_to_cpu(req->dcid);
4262 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4264 mutex_lock(&conn->chan_lock);
4266 chan = __l2cap_get_chan_by_scid(conn, dcid);
4267 if (!chan) {
4268 mutex_unlock(&conn->chan_lock);
4269 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4270 return 0;
4273 l2cap_chan_lock(chan);
4275 rsp.dcid = cpu_to_le16(chan->scid);
4276 rsp.scid = cpu_to_le16(chan->dcid);
4277 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4279 chan->ops->set_shutdown(chan);
4281 l2cap_chan_hold(chan);
4282 l2cap_chan_del(chan, ECONNRESET);
4284 l2cap_chan_unlock(chan);
4286 chan->ops->close(chan);
4287 l2cap_chan_put(chan);
4289 mutex_unlock(&conn->chan_lock);
4291 return 0;
4294 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4295 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4296 u8 *data)
4298 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4299 u16 dcid, scid;
4300 struct l2cap_chan *chan;
4302 if (cmd_len != sizeof(*rsp))
4303 return -EPROTO;
4305 scid = __le16_to_cpu(rsp->scid);
4306 dcid = __le16_to_cpu(rsp->dcid);
4308 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4310 mutex_lock(&conn->chan_lock);
4312 chan = __l2cap_get_chan_by_scid(conn, scid);
4313 if (!chan) {
4314 mutex_unlock(&conn->chan_lock);
4315 return 0;
4318 l2cap_chan_lock(chan);
4320 l2cap_chan_hold(chan);
4321 l2cap_chan_del(chan, 0);
4323 l2cap_chan_unlock(chan);
4325 chan->ops->close(chan);
4326 l2cap_chan_put(chan);
4328 mutex_unlock(&conn->chan_lock);
4330 return 0;
4333 static inline int l2cap_information_req(struct l2cap_conn *conn,
4334 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4335 u8 *data)
4337 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4338 u16 type;
4340 if (cmd_len != sizeof(*req))
4341 return -EPROTO;
4343 type = __le16_to_cpu(req->type);
4345 BT_DBG("type 0x%4.4x", type);
4347 if (type == L2CAP_IT_FEAT_MASK) {
4348 u8 buf[8];
4349 u32 feat_mask = l2cap_feat_mask;
4350 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4351 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4352 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4353 if (!disable_ertm)
4354 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4355 | L2CAP_FEAT_FCS;
4356 if (conn->hs_enabled)
4357 feat_mask |= L2CAP_FEAT_EXT_FLOW
4358 | L2CAP_FEAT_EXT_WINDOW;
4360 put_unaligned_le32(feat_mask, rsp->data);
4361 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4362 buf);
4363 } else if (type == L2CAP_IT_FIXED_CHAN) {
4364 u8 buf[12];
4365 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4367 if (conn->hs_enabled)
4368 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4369 else
4370 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4372 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4373 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4374 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4375 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4376 buf);
4377 } else {
4378 struct l2cap_info_rsp rsp;
4379 rsp.type = cpu_to_le16(type);
4380 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4381 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4382 &rsp);
4385 return 0;
4388 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4389 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4390 u8 *data)
4392 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4393 u16 type, result;
4395 if (cmd_len < sizeof(*rsp))
4396 return -EPROTO;
4398 type = __le16_to_cpu(rsp->type);
4399 result = __le16_to_cpu(rsp->result);
4401 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4403 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4404 if (cmd->ident != conn->info_ident ||
4405 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4406 return 0;
4408 cancel_delayed_work(&conn->info_timer);
4410 if (result != L2CAP_IR_SUCCESS) {
4411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 conn->info_ident = 0;
4414 l2cap_conn_start(conn);
4416 return 0;
4419 switch (type) {
4420 case L2CAP_IT_FEAT_MASK:
4421 conn->feat_mask = get_unaligned_le32(rsp->data);
4423 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4424 struct l2cap_info_req req;
4425 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4427 conn->info_ident = l2cap_get_ident(conn);
4429 l2cap_send_cmd(conn, conn->info_ident,
4430 L2CAP_INFO_REQ, sizeof(req), &req);
4431 } else {
4432 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4433 conn->info_ident = 0;
4435 l2cap_conn_start(conn);
4437 break;
4439 case L2CAP_IT_FIXED_CHAN:
4440 conn->fixed_chan_mask = rsp->data[0];
4441 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4442 conn->info_ident = 0;
4444 l2cap_conn_start(conn);
4445 break;
4448 return 0;
4451 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4452 struct l2cap_cmd_hdr *cmd,
4453 u16 cmd_len, void *data)
4455 struct l2cap_create_chan_req *req = data;
4456 struct l2cap_create_chan_rsp rsp;
4457 struct l2cap_chan *chan;
4458 struct hci_dev *hdev;
4459 u16 psm, scid;
4461 if (cmd_len != sizeof(*req))
4462 return -EPROTO;
4464 if (!conn->hs_enabled)
4465 return -EINVAL;
4467 psm = le16_to_cpu(req->psm);
4468 scid = le16_to_cpu(req->scid);
4470 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4472 /* For controller id 0 make BR/EDR connection */
4473 if (req->amp_id == AMP_ID_BREDR) {
4474 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4475 req->amp_id);
4476 return 0;
4479 /* Validate AMP controller id */
4480 hdev = hci_dev_get(req->amp_id);
4481 if (!hdev)
4482 goto error;
4484 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4485 hci_dev_put(hdev);
4486 goto error;
4489 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4490 req->amp_id);
4491 if (chan) {
4492 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4493 struct hci_conn *hs_hcon;
4495 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4496 &conn->hcon->dst);
4497 if (!hs_hcon) {
4498 hci_dev_put(hdev);
4499 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4500 chan->dcid);
4501 return 0;
4504 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4506 mgr->bredr_chan = chan;
4507 chan->hs_hcon = hs_hcon;
4508 chan->fcs = L2CAP_FCS_NONE;
4509 conn->mtu = hdev->block_mtu;
4512 hci_dev_put(hdev);
4514 return 0;
4516 error:
4517 rsp.dcid = 0;
4518 rsp.scid = cpu_to_le16(scid);
4519 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4520 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4522 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4523 sizeof(rsp), &rsp);
4525 return 0;
4528 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4530 struct l2cap_move_chan_req req;
4531 u8 ident;
4533 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4535 ident = l2cap_get_ident(chan->conn);
4536 chan->ident = ident;
4538 req.icid = cpu_to_le16(chan->scid);
4539 req.dest_amp_id = dest_amp_id;
4541 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4542 &req);
4544 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4547 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4549 struct l2cap_move_chan_rsp rsp;
4551 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4553 rsp.icid = cpu_to_le16(chan->dcid);
4554 rsp.result = cpu_to_le16(result);
4556 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4557 sizeof(rsp), &rsp);
4560 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4562 struct l2cap_move_chan_cfm cfm;
4564 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4566 chan->ident = l2cap_get_ident(chan->conn);
4568 cfm.icid = cpu_to_le16(chan->scid);
4569 cfm.result = cpu_to_le16(result);
4571 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4572 sizeof(cfm), &cfm);
4574 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4577 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4579 struct l2cap_move_chan_cfm cfm;
4581 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4583 cfm.icid = cpu_to_le16(icid);
4584 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4586 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4587 sizeof(cfm), &cfm);
4590 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4591 u16 icid)
4593 struct l2cap_move_chan_cfm_rsp rsp;
4595 BT_DBG("icid 0x%4.4x", icid);
4597 rsp.icid = cpu_to_le16(icid);
4598 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4601 static void __release_logical_link(struct l2cap_chan *chan)
4603 chan->hs_hchan = NULL;
4604 chan->hs_hcon = NULL;
4606 /* Placeholder - release the logical link */
4609 static void l2cap_logical_fail(struct l2cap_chan *chan)
4611 /* Logical link setup failed */
4612 if (chan->state != BT_CONNECTED) {
4613 /* Create channel failure, disconnect */
4614 l2cap_send_disconn_req(chan, ECONNRESET);
4615 return;
4618 switch (chan->move_role) {
4619 case L2CAP_MOVE_ROLE_RESPONDER:
4620 l2cap_move_done(chan);
4621 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4622 break;
4623 case L2CAP_MOVE_ROLE_INITIATOR:
4624 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4625 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4626 /* Remote has only sent pending or
4627 * success responses, clean up
4629 l2cap_move_done(chan);
4632 /* Other amp move states imply that the move
4633 * has already aborted
4635 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4636 break;
4640 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4641 struct hci_chan *hchan)
4643 struct l2cap_conf_rsp rsp;
4645 chan->hs_hchan = hchan;
4646 chan->hs_hcon->l2cap_data = chan->conn;
4648 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4650 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4651 int err;
4653 set_default_fcs(chan);
4655 err = l2cap_ertm_init(chan);
4656 if (err < 0)
4657 l2cap_send_disconn_req(chan, -err);
4658 else
4659 l2cap_chan_ready(chan);
4663 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4664 struct hci_chan *hchan)
4666 chan->hs_hcon = hchan->conn;
4667 chan->hs_hcon->l2cap_data = chan->conn;
4669 BT_DBG("move_state %d", chan->move_state);
4671 switch (chan->move_state) {
4672 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4673 /* Move confirm will be sent after a success
4674 * response is received
4676 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4677 break;
4678 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4679 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4680 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4681 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4682 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4683 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4684 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4685 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4686 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4688 break;
4689 default:
4690 /* Move was not in expected state, free the channel */
4691 __release_logical_link(chan);
4693 chan->move_state = L2CAP_MOVE_STABLE;
4697 /* Call with chan locked */
4698 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4699 u8 status)
4701 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4703 if (status) {
4704 l2cap_logical_fail(chan);
4705 __release_logical_link(chan);
4706 return;
4709 if (chan->state != BT_CONNECTED) {
4710 /* Ignore logical link if channel is on BR/EDR */
4711 if (chan->local_amp_id != AMP_ID_BREDR)
4712 l2cap_logical_finish_create(chan, hchan);
4713 } else {
4714 l2cap_logical_finish_move(chan, hchan);
4718 void l2cap_move_start(struct l2cap_chan *chan)
4720 BT_DBG("chan %p", chan);
4722 if (chan->local_amp_id == AMP_ID_BREDR) {
4723 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4724 return;
4725 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4726 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4727 /* Placeholder - start physical link setup */
4728 } else {
4729 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4730 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4731 chan->move_id = 0;
4732 l2cap_move_setup(chan);
4733 l2cap_send_move_chan_req(chan, 0);
4737 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4738 u8 local_amp_id, u8 remote_amp_id)
4740 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4741 local_amp_id, remote_amp_id);
4743 chan->fcs = L2CAP_FCS_NONE;
4745 /* Outgoing channel on AMP */
4746 if (chan->state == BT_CONNECT) {
4747 if (result == L2CAP_CR_SUCCESS) {
4748 chan->local_amp_id = local_amp_id;
4749 l2cap_send_create_chan_req(chan, remote_amp_id);
4750 } else {
4751 /* Revert to BR/EDR connect */
4752 l2cap_send_conn_req(chan);
4755 return;
4758 /* Incoming channel on AMP */
4759 if (__l2cap_no_conn_pending(chan)) {
4760 struct l2cap_conn_rsp rsp;
4761 char buf[128];
4762 rsp.scid = cpu_to_le16(chan->dcid);
4763 rsp.dcid = cpu_to_le16(chan->scid);
4765 if (result == L2CAP_CR_SUCCESS) {
4766 /* Send successful response */
4767 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4768 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4769 } else {
4770 /* Send negative response */
4771 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4775 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4776 sizeof(rsp), &rsp);
4778 if (result == L2CAP_CR_SUCCESS) {
4779 l2cap_state_change(chan, BT_CONFIG);
4780 set_bit(CONF_REQ_SENT, &chan->conf_state);
4781 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4782 L2CAP_CONF_REQ,
4783 l2cap_build_conf_req(chan, buf), buf);
4784 chan->num_conf_req++;
4789 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4790 u8 remote_amp_id)
4792 l2cap_move_setup(chan);
4793 chan->move_id = local_amp_id;
4794 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4796 l2cap_send_move_chan_req(chan, remote_amp_id);
4799 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4801 struct hci_chan *hchan = NULL;
4803 /* Placeholder - get hci_chan for logical link */
4805 if (hchan) {
4806 if (hchan->state == BT_CONNECTED) {
4807 /* Logical link is ready to go */
4808 chan->hs_hcon = hchan->conn;
4809 chan->hs_hcon->l2cap_data = chan->conn;
4810 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4811 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4813 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4814 } else {
4815 /* Wait for logical link to be ready */
4816 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4818 } else {
4819 /* Logical link not available */
4820 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4824 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4826 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4827 u8 rsp_result;
4828 if (result == -EINVAL)
4829 rsp_result = L2CAP_MR_BAD_ID;
4830 else
4831 rsp_result = L2CAP_MR_NOT_ALLOWED;
4833 l2cap_send_move_chan_rsp(chan, rsp_result);
4836 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4837 chan->move_state = L2CAP_MOVE_STABLE;
4839 /* Restart data transmission */
4840 l2cap_ertm_send(chan);
4843 /* Invoke with locked chan */
4844 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4846 u8 local_amp_id = chan->local_amp_id;
4847 u8 remote_amp_id = chan->remote_amp_id;
4849 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4850 chan, result, local_amp_id, remote_amp_id);
4852 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4853 l2cap_chan_unlock(chan);
4854 return;
4857 if (chan->state != BT_CONNECTED) {
4858 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4859 } else if (result != L2CAP_MR_SUCCESS) {
4860 l2cap_do_move_cancel(chan, result);
4861 } else {
4862 switch (chan->move_role) {
4863 case L2CAP_MOVE_ROLE_INITIATOR:
4864 l2cap_do_move_initiate(chan, local_amp_id,
4865 remote_amp_id);
4866 break;
4867 case L2CAP_MOVE_ROLE_RESPONDER:
4868 l2cap_do_move_respond(chan, result);
4869 break;
4870 default:
4871 l2cap_do_move_cancel(chan, result);
4872 break;
4877 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4878 struct l2cap_cmd_hdr *cmd,
4879 u16 cmd_len, void *data)
4881 struct l2cap_move_chan_req *req = data;
4882 struct l2cap_move_chan_rsp rsp;
4883 struct l2cap_chan *chan;
4884 u16 icid = 0;
4885 u16 result = L2CAP_MR_NOT_ALLOWED;
4887 if (cmd_len != sizeof(*req))
4888 return -EPROTO;
4890 icid = le16_to_cpu(req->icid);
4892 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4894 if (!conn->hs_enabled)
4895 return -EINVAL;
4897 chan = l2cap_get_chan_by_dcid(conn, icid);
4898 if (!chan) {
4899 rsp.icid = cpu_to_le16(icid);
4900 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4901 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4902 sizeof(rsp), &rsp);
4903 return 0;
4906 chan->ident = cmd->ident;
4908 if (chan->scid < L2CAP_CID_DYN_START ||
4909 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4910 (chan->mode != L2CAP_MODE_ERTM &&
4911 chan->mode != L2CAP_MODE_STREAMING)) {
4912 result = L2CAP_MR_NOT_ALLOWED;
4913 goto send_move_response;
4916 if (chan->local_amp_id == req->dest_amp_id) {
4917 result = L2CAP_MR_SAME_ID;
4918 goto send_move_response;
4921 if (req->dest_amp_id != AMP_ID_BREDR) {
4922 struct hci_dev *hdev;
4923 hdev = hci_dev_get(req->dest_amp_id);
4924 if (!hdev || hdev->dev_type != HCI_AMP ||
4925 !test_bit(HCI_UP, &hdev->flags)) {
4926 if (hdev)
4927 hci_dev_put(hdev);
4929 result = L2CAP_MR_BAD_ID;
4930 goto send_move_response;
4932 hci_dev_put(hdev);
4935 /* Detect a move collision. Only send a collision response
4936 * if this side has "lost", otherwise proceed with the move.
4937 * The winner has the larger bd_addr.
4939 if ((__chan_is_moving(chan) ||
4940 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4941 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4942 result = L2CAP_MR_COLLISION;
4943 goto send_move_response;
4946 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4947 l2cap_move_setup(chan);
4948 chan->move_id = req->dest_amp_id;
4949 icid = chan->dcid;
4951 if (req->dest_amp_id == AMP_ID_BREDR) {
4952 /* Moving to BR/EDR */
4953 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4954 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4955 result = L2CAP_MR_PEND;
4956 } else {
4957 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4958 result = L2CAP_MR_SUCCESS;
4960 } else {
4961 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4962 /* Placeholder - uncomment when amp functions are available */
4963 /*amp_accept_physical(chan, req->dest_amp_id);*/
4964 result = L2CAP_MR_PEND;
4967 send_move_response:
4968 l2cap_send_move_chan_rsp(chan, result);
4970 l2cap_chan_unlock(chan);
4972 return 0;
4975 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4977 struct l2cap_chan *chan;
4978 struct hci_chan *hchan = NULL;
4980 chan = l2cap_get_chan_by_scid(conn, icid);
4981 if (!chan) {
4982 l2cap_send_move_chan_cfm_icid(conn, icid);
4983 return;
4986 __clear_chan_timer(chan);
4987 if (result == L2CAP_MR_PEND)
4988 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4990 switch (chan->move_state) {
4991 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4992 /* Move confirm will be sent when logical link
4993 * is complete.
4995 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4996 break;
4997 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4998 if (result == L2CAP_MR_PEND) {
4999 break;
5000 } else if (test_bit(CONN_LOCAL_BUSY,
5001 &chan->conn_state)) {
5002 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5003 } else {
5004 /* Logical link is up or moving to BR/EDR,
5005 * proceed with move
5007 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5008 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5010 break;
5011 case L2CAP_MOVE_WAIT_RSP:
5012 /* Moving to AMP */
5013 if (result == L2CAP_MR_SUCCESS) {
5014 /* Remote is ready, send confirm immediately
5015 * after logical link is ready
5017 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5018 } else {
5019 /* Both logical link and move success
5020 * are required to confirm
5022 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5025 /* Placeholder - get hci_chan for logical link */
5026 if (!hchan) {
5027 /* Logical link not available */
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 break;
5032 /* If the logical link is not yet connected, do not
5033 * send confirmation.
5035 if (hchan->state != BT_CONNECTED)
5036 break;
5038 /* Logical link is already ready to go */
5040 chan->hs_hcon = hchan->conn;
5041 chan->hs_hcon->l2cap_data = chan->conn;
5043 if (result == L2CAP_MR_SUCCESS) {
5044 /* Can confirm now */
5045 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5046 } else {
5047 /* Now only need move success
5048 * to confirm
5050 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5053 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5054 break;
5055 default:
5056 /* Any other amp move state means the move failed. */
5057 chan->move_id = chan->local_amp_id;
5058 l2cap_move_done(chan);
5059 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5062 l2cap_chan_unlock(chan);
5065 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5066 u16 result)
5068 struct l2cap_chan *chan;
5070 chan = l2cap_get_chan_by_ident(conn, ident);
5071 if (!chan) {
5072 /* Could not locate channel, icid is best guess */
5073 l2cap_send_move_chan_cfm_icid(conn, icid);
5074 return;
5077 __clear_chan_timer(chan);
5079 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5080 if (result == L2CAP_MR_COLLISION) {
5081 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5082 } else {
5083 /* Cleanup - cancel move */
5084 chan->move_id = chan->local_amp_id;
5085 l2cap_move_done(chan);
5089 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5091 l2cap_chan_unlock(chan);
5094 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5095 struct l2cap_cmd_hdr *cmd,
5096 u16 cmd_len, void *data)
5098 struct l2cap_move_chan_rsp *rsp = data;
5099 u16 icid, result;
5101 if (cmd_len != sizeof(*rsp))
5102 return -EPROTO;
5104 icid = le16_to_cpu(rsp->icid);
5105 result = le16_to_cpu(rsp->result);
5107 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5109 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5110 l2cap_move_continue(conn, icid, result);
5111 else
5112 l2cap_move_fail(conn, cmd->ident, icid, result);
5114 return 0;
5117 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5118 struct l2cap_cmd_hdr *cmd,
5119 u16 cmd_len, void *data)
5121 struct l2cap_move_chan_cfm *cfm = data;
5122 struct l2cap_chan *chan;
5123 u16 icid, result;
5125 if (cmd_len != sizeof(*cfm))
5126 return -EPROTO;
5128 icid = le16_to_cpu(cfm->icid);
5129 result = le16_to_cpu(cfm->result);
5131 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5133 chan = l2cap_get_chan_by_dcid(conn, icid);
5134 if (!chan) {
5135 /* Spec requires a response even if the icid was not found */
5136 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5137 return 0;
5140 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5141 if (result == L2CAP_MC_CONFIRMED) {
5142 chan->local_amp_id = chan->move_id;
5143 if (chan->local_amp_id == AMP_ID_BREDR)
5144 __release_logical_link(chan);
5145 } else {
5146 chan->move_id = chan->local_amp_id;
5149 l2cap_move_done(chan);
5152 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5154 l2cap_chan_unlock(chan);
5156 return 0;
5159 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5160 struct l2cap_cmd_hdr *cmd,
5161 u16 cmd_len, void *data)
5163 struct l2cap_move_chan_cfm_rsp *rsp = data;
5164 struct l2cap_chan *chan;
5165 u16 icid;
5167 if (cmd_len != sizeof(*rsp))
5168 return -EPROTO;
5170 icid = le16_to_cpu(rsp->icid);
5172 BT_DBG("icid 0x%4.4x", icid);
5174 chan = l2cap_get_chan_by_scid(conn, icid);
5175 if (!chan)
5176 return 0;
5178 __clear_chan_timer(chan);
5180 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5181 chan->local_amp_id = chan->move_id;
5183 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5184 __release_logical_link(chan);
5186 l2cap_move_done(chan);
5189 l2cap_chan_unlock(chan);
5191 return 0;
5194 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5195 u16 to_multiplier)
5197 u16 max_latency;
5199 if (min > max || min < 6 || max > 3200)
5200 return -EINVAL;
5202 if (to_multiplier < 10 || to_multiplier > 3200)
5203 return -EINVAL;
5205 if (max >= to_multiplier * 8)
5206 return -EINVAL;
5208 max_latency = (to_multiplier * 8 / max) - 1;
5209 if (latency > 499 || latency > max_latency)
5210 return -EINVAL;
5212 return 0;
5215 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5216 struct l2cap_cmd_hdr *cmd,
5217 u16 cmd_len, u8 *data)
5219 struct hci_conn *hcon = conn->hcon;
5220 struct l2cap_conn_param_update_req *req;
5221 struct l2cap_conn_param_update_rsp rsp;
5222 u16 min, max, latency, to_multiplier;
5223 int err;
5225 if (!(hcon->link_mode & HCI_LM_MASTER))
5226 return -EINVAL;
5228 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5229 return -EPROTO;
5231 req = (struct l2cap_conn_param_update_req *) data;
5232 min = __le16_to_cpu(req->min);
5233 max = __le16_to_cpu(req->max);
5234 latency = __le16_to_cpu(req->latency);
5235 to_multiplier = __le16_to_cpu(req->to_multiplier);
5237 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5238 min, max, latency, to_multiplier);
5240 memset(&rsp, 0, sizeof(rsp));
5242 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5243 if (err)
5244 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5245 else
5246 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5248 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5249 sizeof(rsp), &rsp);
5251 if (!err)
5252 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5254 return 0;
5257 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5258 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5259 u8 *data)
5261 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5262 u16 dcid, mtu, mps, credits, result;
5263 struct l2cap_chan *chan;
5264 int err;
5266 if (cmd_len < sizeof(*rsp))
5267 return -EPROTO;
5269 dcid = __le16_to_cpu(rsp->dcid);
5270 mtu = __le16_to_cpu(rsp->mtu);
5271 mps = __le16_to_cpu(rsp->mps);
5272 credits = __le16_to_cpu(rsp->credits);
5273 result = __le16_to_cpu(rsp->result);
5275 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5276 return -EPROTO;
5278 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5279 dcid, mtu, mps, credits, result);
5281 mutex_lock(&conn->chan_lock);
5283 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5284 if (!chan) {
5285 err = -EBADSLT;
5286 goto unlock;
5289 err = 0;
5291 l2cap_chan_lock(chan);
5293 switch (result) {
5294 case L2CAP_CR_SUCCESS:
5295 chan->ident = 0;
5296 chan->dcid = dcid;
5297 chan->omtu = mtu;
5298 chan->remote_mps = mps;
5299 chan->tx_credits = credits;
5300 l2cap_chan_ready(chan);
5301 break;
5303 default:
5304 l2cap_chan_del(chan, ECONNREFUSED);
5305 break;
5308 l2cap_chan_unlock(chan);
5310 unlock:
5311 mutex_unlock(&conn->chan_lock);
5313 return err;
5316 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5317 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5318 u8 *data)
5320 int err = 0;
5322 switch (cmd->code) {
5323 case L2CAP_COMMAND_REJ:
5324 l2cap_command_rej(conn, cmd, cmd_len, data);
5325 break;
5327 case L2CAP_CONN_REQ:
5328 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5329 break;
5331 case L2CAP_CONN_RSP:
5332 case L2CAP_CREATE_CHAN_RSP:
5333 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5334 break;
5336 case L2CAP_CONF_REQ:
5337 err = l2cap_config_req(conn, cmd, cmd_len, data);
5338 break;
5340 case L2CAP_CONF_RSP:
5341 l2cap_config_rsp(conn, cmd, cmd_len, data);
5342 break;
5344 case L2CAP_DISCONN_REQ:
5345 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5346 break;
5348 case L2CAP_DISCONN_RSP:
5349 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5350 break;
5352 case L2CAP_ECHO_REQ:
5353 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5354 break;
5356 case L2CAP_ECHO_RSP:
5357 break;
5359 case L2CAP_INFO_REQ:
5360 err = l2cap_information_req(conn, cmd, cmd_len, data);
5361 break;
5363 case L2CAP_INFO_RSP:
5364 l2cap_information_rsp(conn, cmd, cmd_len, data);
5365 break;
5367 case L2CAP_CREATE_CHAN_REQ:
5368 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5369 break;
5371 case L2CAP_MOVE_CHAN_REQ:
5372 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5373 break;
5375 case L2CAP_MOVE_CHAN_RSP:
5376 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5377 break;
5379 case L2CAP_MOVE_CHAN_CFM:
5380 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5381 break;
5383 case L2CAP_MOVE_CHAN_CFM_RSP:
5384 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5385 break;
5387 default:
5388 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5389 err = -EINVAL;
5390 break;
5393 return err;
5396 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5397 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5398 u8 *data)
5400 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5401 struct l2cap_le_conn_rsp rsp;
5402 struct l2cap_chan *chan, *pchan;
5403 u16 dcid, scid, credits, mtu, mps;
5404 __le16 psm;
5405 u8 result;
5407 if (cmd_len != sizeof(*req))
5408 return -EPROTO;
5410 scid = __le16_to_cpu(req->scid);
5411 mtu = __le16_to_cpu(req->mtu);
5412 mps = __le16_to_cpu(req->mps);
5413 psm = req->psm;
5414 dcid = 0;
5415 credits = 0;
5417 if (mtu < 23 || mps < 23)
5418 return -EPROTO;
5420 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5421 scid, mtu, mps);
5423 /* Check if we have socket listening on psm */
5424 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5425 &conn->hcon->dst, LE_LINK);
5426 if (!pchan) {
5427 result = L2CAP_CR_BAD_PSM;
5428 chan = NULL;
5429 goto response;
5432 mutex_lock(&conn->chan_lock);
5433 l2cap_chan_lock(pchan);
5435 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5436 result = L2CAP_CR_AUTHENTICATION;
5437 chan = NULL;
5438 goto response_unlock;
5441 /* Check if we already have channel with that dcid */
5442 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5443 result = L2CAP_CR_NO_MEM;
5444 chan = NULL;
5445 goto response_unlock;
5448 chan = pchan->ops->new_connection(pchan);
5449 if (!chan) {
5450 result = L2CAP_CR_NO_MEM;
5451 goto response_unlock;
5454 l2cap_le_flowctl_init(chan);
5456 bacpy(&chan->src, &conn->hcon->src);
5457 bacpy(&chan->dst, &conn->hcon->dst);
5458 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5459 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5460 chan->psm = psm;
5461 chan->dcid = scid;
5462 chan->omtu = mtu;
5463 chan->remote_mps = mps;
5464 chan->tx_credits = __le16_to_cpu(req->credits);
5466 __l2cap_chan_add(conn, chan);
5467 dcid = chan->scid;
5468 credits = chan->rx_credits;
5470 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5472 chan->ident = cmd->ident;
5474 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5475 l2cap_state_change(chan, BT_CONNECT2);
5476 result = L2CAP_CR_PEND;
5477 chan->ops->defer(chan);
5478 } else {
5479 l2cap_chan_ready(chan);
5480 result = L2CAP_CR_SUCCESS;
5483 response_unlock:
5484 l2cap_chan_unlock(pchan);
5485 mutex_unlock(&conn->chan_lock);
5487 if (result == L2CAP_CR_PEND)
5488 return 0;
5490 response:
5491 if (chan) {
5492 rsp.mtu = cpu_to_le16(chan->imtu);
5493 rsp.mps = cpu_to_le16(chan->mps);
5494 } else {
5495 rsp.mtu = 0;
5496 rsp.mps = 0;
5499 rsp.dcid = cpu_to_le16(dcid);
5500 rsp.credits = cpu_to_le16(credits);
5501 rsp.result = cpu_to_le16(result);
5503 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5505 return 0;
5508 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5509 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5510 u8 *data)
5512 struct l2cap_le_credits *pkt;
5513 struct l2cap_chan *chan;
5514 u16 cid, credits, max_credits;
5516 if (cmd_len != sizeof(*pkt))
5517 return -EPROTO;
5519 pkt = (struct l2cap_le_credits *) data;
5520 cid = __le16_to_cpu(pkt->cid);
5521 credits = __le16_to_cpu(pkt->credits);
5523 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5525 chan = l2cap_get_chan_by_dcid(conn, cid);
5526 if (!chan)
5527 return -EBADSLT;
5529 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5530 if (credits > max_credits) {
5531 BT_ERR("LE credits overflow");
5532 l2cap_send_disconn_req(chan, ECONNRESET);
5534 /* Return 0 so that we don't trigger an unnecessary
5535 * command reject packet.
5537 return 0;
5540 chan->tx_credits += credits;
5542 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5543 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5544 chan->tx_credits--;
5547 if (chan->tx_credits)
5548 chan->ops->resume(chan);
5550 l2cap_chan_unlock(chan);
5552 return 0;
5555 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5556 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5557 u8 *data)
5559 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5560 struct l2cap_chan *chan;
5562 if (cmd_len < sizeof(*rej))
5563 return -EPROTO;
5565 mutex_lock(&conn->chan_lock);
5567 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5568 if (!chan)
5569 goto done;
5571 l2cap_chan_lock(chan);
5572 l2cap_chan_del(chan, ECONNREFUSED);
5573 l2cap_chan_unlock(chan);
5575 done:
5576 mutex_unlock(&conn->chan_lock);
5577 return 0;
5580 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5581 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5582 u8 *data)
5584 int err = 0;
5586 switch (cmd->code) {
5587 case L2CAP_COMMAND_REJ:
5588 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5589 break;
5591 case L2CAP_CONN_PARAM_UPDATE_REQ:
5592 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5593 break;
5595 case L2CAP_CONN_PARAM_UPDATE_RSP:
5596 break;
5598 case L2CAP_LE_CONN_RSP:
5599 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5600 break;
5602 case L2CAP_LE_CONN_REQ:
5603 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5604 break;
5606 case L2CAP_LE_CREDITS:
5607 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5608 break;
5610 case L2CAP_DISCONN_REQ:
5611 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5612 break;
5614 case L2CAP_DISCONN_RSP:
5615 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5616 break;
5618 default:
5619 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5620 err = -EINVAL;
5621 break;
5624 return err;
5627 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5628 struct sk_buff *skb)
5630 struct hci_conn *hcon = conn->hcon;
5631 struct l2cap_cmd_hdr *cmd;
5632 u16 len;
5633 int err;
5635 if (hcon->type != LE_LINK)
5636 goto drop;
5638 if (skb->len < L2CAP_CMD_HDR_SIZE)
5639 goto drop;
5641 cmd = (void *) skb->data;
5642 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5644 len = le16_to_cpu(cmd->len);
5646 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5648 if (len != skb->len || !cmd->ident) {
5649 BT_DBG("corrupted command");
5650 goto drop;
5653 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5654 if (err) {
5655 struct l2cap_cmd_rej_unk rej;
5657 BT_ERR("Wrong link type (%d)", err);
5659 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5660 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5661 sizeof(rej), &rej);
5664 drop:
5665 kfree_skb(skb);
5668 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5669 struct sk_buff *skb)
5671 struct hci_conn *hcon = conn->hcon;
5672 u8 *data = skb->data;
5673 int len = skb->len;
5674 struct l2cap_cmd_hdr cmd;
5675 int err;
5677 l2cap_raw_recv(conn, skb);
5679 if (hcon->type != ACL_LINK)
5680 goto drop;
5682 while (len >= L2CAP_CMD_HDR_SIZE) {
5683 u16 cmd_len;
5684 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5685 data += L2CAP_CMD_HDR_SIZE;
5686 len -= L2CAP_CMD_HDR_SIZE;
5688 cmd_len = le16_to_cpu(cmd.len);
5690 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5691 cmd.ident);
5693 if (cmd_len > len || !cmd.ident) {
5694 BT_DBG("corrupted command");
5695 break;
5698 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5699 if (err) {
5700 struct l2cap_cmd_rej_unk rej;
5702 BT_ERR("Wrong link type (%d)", err);
5704 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5705 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5706 sizeof(rej), &rej);
5709 data += cmd_len;
5710 len -= cmd_len;
5713 drop:
5714 kfree_skb(skb);
5717 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5719 u16 our_fcs, rcv_fcs;
5720 int hdr_size;
5722 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5723 hdr_size = L2CAP_EXT_HDR_SIZE;
5724 else
5725 hdr_size = L2CAP_ENH_HDR_SIZE;
5727 if (chan->fcs == L2CAP_FCS_CRC16) {
5728 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5729 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5730 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5732 if (our_fcs != rcv_fcs)
5733 return -EBADMSG;
5735 return 0;
5738 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5740 struct l2cap_ctrl control;
5742 BT_DBG("chan %p", chan);
5744 memset(&control, 0, sizeof(control));
5745 control.sframe = 1;
5746 control.final = 1;
5747 control.reqseq = chan->buffer_seq;
5748 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5750 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5751 control.super = L2CAP_SUPER_RNR;
5752 l2cap_send_sframe(chan, &control);
5755 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5756 chan->unacked_frames > 0)
5757 __set_retrans_timer(chan);
5759 /* Send pending iframes */
5760 l2cap_ertm_send(chan);
5762 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5763 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5764 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5765 * send it now.
5767 control.super = L2CAP_SUPER_RR;
5768 l2cap_send_sframe(chan, &control);
5772 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5773 struct sk_buff **last_frag)
5775 /* skb->len reflects data in skb as well as all fragments
5776 * skb->data_len reflects only data in fragments
5778 if (!skb_has_frag_list(skb))
5779 skb_shinfo(skb)->frag_list = new_frag;
5781 new_frag->next = NULL;
5783 (*last_frag)->next = new_frag;
5784 *last_frag = new_frag;
5786 skb->len += new_frag->len;
5787 skb->data_len += new_frag->len;
5788 skb->truesize += new_frag->truesize;
5791 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5792 struct l2cap_ctrl *control)
5794 int err = -EINVAL;
5796 switch (control->sar) {
5797 case L2CAP_SAR_UNSEGMENTED:
5798 if (chan->sdu)
5799 break;
5801 err = chan->ops->recv(chan, skb);
5802 break;
5804 case L2CAP_SAR_START:
5805 if (chan->sdu)
5806 break;
5808 chan->sdu_len = get_unaligned_le16(skb->data);
5809 skb_pull(skb, L2CAP_SDULEN_SIZE);
5811 if (chan->sdu_len > chan->imtu) {
5812 err = -EMSGSIZE;
5813 break;
5816 if (skb->len >= chan->sdu_len)
5817 break;
5819 chan->sdu = skb;
5820 chan->sdu_last_frag = skb;
5822 skb = NULL;
5823 err = 0;
5824 break;
5826 case L2CAP_SAR_CONTINUE:
5827 if (!chan->sdu)
5828 break;
5830 append_skb_frag(chan->sdu, skb,
5831 &chan->sdu_last_frag);
5832 skb = NULL;
5834 if (chan->sdu->len >= chan->sdu_len)
5835 break;
5837 err = 0;
5838 break;
5840 case L2CAP_SAR_END:
5841 if (!chan->sdu)
5842 break;
5844 append_skb_frag(chan->sdu, skb,
5845 &chan->sdu_last_frag);
5846 skb = NULL;
5848 if (chan->sdu->len != chan->sdu_len)
5849 break;
5851 err = chan->ops->recv(chan, chan->sdu);
5853 if (!err) {
5854 /* Reassembly complete */
5855 chan->sdu = NULL;
5856 chan->sdu_last_frag = NULL;
5857 chan->sdu_len = 0;
5859 break;
5862 if (err) {
5863 kfree_skb(skb);
5864 kfree_skb(chan->sdu);
5865 chan->sdu = NULL;
5866 chan->sdu_last_frag = NULL;
5867 chan->sdu_len = 0;
5870 return err;
5873 static int l2cap_resegment(struct l2cap_chan *chan)
5875 /* Placeholder */
5876 return 0;
5879 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5881 u8 event;
5883 if (chan->mode != L2CAP_MODE_ERTM)
5884 return;
5886 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5887 l2cap_tx(chan, NULL, NULL, event);
5890 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5892 int err = 0;
5893 /* Pass sequential frames to l2cap_reassemble_sdu()
5894 * until a gap is encountered.
5897 BT_DBG("chan %p", chan);
5899 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5900 struct sk_buff *skb;
5901 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5902 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5904 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5906 if (!skb)
5907 break;
5909 skb_unlink(skb, &chan->srej_q);
5910 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5911 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5912 if (err)
5913 break;
5916 if (skb_queue_empty(&chan->srej_q)) {
5917 chan->rx_state = L2CAP_RX_STATE_RECV;
5918 l2cap_send_ack(chan);
5921 return err;
5924 static void l2cap_handle_srej(struct l2cap_chan *chan,
5925 struct l2cap_ctrl *control)
5927 struct sk_buff *skb;
5929 BT_DBG("chan %p, control %p", chan, control);
5931 if (control->reqseq == chan->next_tx_seq) {
5932 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5933 l2cap_send_disconn_req(chan, ECONNRESET);
5934 return;
5937 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5939 if (skb == NULL) {
5940 BT_DBG("Seq %d not available for retransmission",
5941 control->reqseq);
5942 return;
5945 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5947 l2cap_send_disconn_req(chan, ECONNRESET);
5948 return;
5951 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5953 if (control->poll) {
5954 l2cap_pass_to_tx(chan, control);
5956 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5957 l2cap_retransmit(chan, control);
5958 l2cap_ertm_send(chan);
5960 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5961 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5962 chan->srej_save_reqseq = control->reqseq;
5964 } else {
5965 l2cap_pass_to_tx_fbit(chan, control);
5967 if (control->final) {
5968 if (chan->srej_save_reqseq != control->reqseq ||
5969 !test_and_clear_bit(CONN_SREJ_ACT,
5970 &chan->conn_state))
5971 l2cap_retransmit(chan, control);
5972 } else {
5973 l2cap_retransmit(chan, control);
5974 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5975 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5976 chan->srej_save_reqseq = control->reqseq;
5982 static void l2cap_handle_rej(struct l2cap_chan *chan,
5983 struct l2cap_ctrl *control)
5985 struct sk_buff *skb;
5987 BT_DBG("chan %p, control %p", chan, control);
5989 if (control->reqseq == chan->next_tx_seq) {
5990 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5991 l2cap_send_disconn_req(chan, ECONNRESET);
5992 return;
5995 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5997 if (chan->max_tx && skb &&
5998 bt_cb(skb)->control.retries >= chan->max_tx) {
5999 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6000 l2cap_send_disconn_req(chan, ECONNRESET);
6001 return;
6004 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6006 l2cap_pass_to_tx(chan, control);
6008 if (control->final) {
6009 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6010 l2cap_retransmit_all(chan, control);
6011 } else {
6012 l2cap_retransmit_all(chan, control);
6013 l2cap_ertm_send(chan);
6014 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6015 set_bit(CONN_REJ_ACT, &chan->conn_state);
6019 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6021 BT_DBG("chan %p, txseq %d", chan, txseq);
6023 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6024 chan->expected_tx_seq);
6026 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6027 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6028 chan->tx_win) {
6029 /* See notes below regarding "double poll" and
6030 * invalid packets.
6032 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6033 BT_DBG("Invalid/Ignore - after SREJ");
6034 return L2CAP_TXSEQ_INVALID_IGNORE;
6035 } else {
6036 BT_DBG("Invalid - in window after SREJ sent");
6037 return L2CAP_TXSEQ_INVALID;
6041 if (chan->srej_list.head == txseq) {
6042 BT_DBG("Expected SREJ");
6043 return L2CAP_TXSEQ_EXPECTED_SREJ;
6046 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6047 BT_DBG("Duplicate SREJ - txseq already stored");
6048 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6051 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6052 BT_DBG("Unexpected SREJ - not requested");
6053 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6057 if (chan->expected_tx_seq == txseq) {
6058 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6059 chan->tx_win) {
6060 BT_DBG("Invalid - txseq outside tx window");
6061 return L2CAP_TXSEQ_INVALID;
6062 } else {
6063 BT_DBG("Expected");
6064 return L2CAP_TXSEQ_EXPECTED;
6068 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6069 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6070 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6071 return L2CAP_TXSEQ_DUPLICATE;
6074 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6075 /* A source of invalid packets is a "double poll" condition,
6076 * where delays cause us to send multiple poll packets. If
6077 * the remote stack receives and processes both polls,
6078 * sequence numbers can wrap around in such a way that a
6079 * resent frame has a sequence number that looks like new data
6080 * with a sequence gap. This would trigger an erroneous SREJ
6081 * request.
6083 * Fortunately, this is impossible with a tx window that's
6084 * less than half of the maximum sequence number, which allows
6085 * invalid frames to be safely ignored.
6087 * With tx window sizes greater than half of the tx window
6088 * maximum, the frame is invalid and cannot be ignored. This
6089 * causes a disconnect.
6092 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6093 BT_DBG("Invalid/Ignore - txseq outside tx window");
6094 return L2CAP_TXSEQ_INVALID_IGNORE;
6095 } else {
6096 BT_DBG("Invalid - txseq outside tx window");
6097 return L2CAP_TXSEQ_INVALID;
6099 } else {
6100 BT_DBG("Unexpected - txseq indicates missing frames");
6101 return L2CAP_TXSEQ_UNEXPECTED;
6105 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6106 struct l2cap_ctrl *control,
6107 struct sk_buff *skb, u8 event)
6109 int err = 0;
6110 bool skb_in_use = false;
6112 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6113 event);
6115 switch (event) {
6116 case L2CAP_EV_RECV_IFRAME:
6117 switch (l2cap_classify_txseq(chan, control->txseq)) {
6118 case L2CAP_TXSEQ_EXPECTED:
6119 l2cap_pass_to_tx(chan, control);
6121 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6122 BT_DBG("Busy, discarding expected seq %d",
6123 control->txseq);
6124 break;
6127 chan->expected_tx_seq = __next_seq(chan,
6128 control->txseq);
6130 chan->buffer_seq = chan->expected_tx_seq;
6131 skb_in_use = true;
6133 err = l2cap_reassemble_sdu(chan, skb, control);
6134 if (err)
6135 break;
6137 if (control->final) {
6138 if (!test_and_clear_bit(CONN_REJ_ACT,
6139 &chan->conn_state)) {
6140 control->final = 0;
6141 l2cap_retransmit_all(chan, control);
6142 l2cap_ertm_send(chan);
6146 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6147 l2cap_send_ack(chan);
6148 break;
6149 case L2CAP_TXSEQ_UNEXPECTED:
6150 l2cap_pass_to_tx(chan, control);
6152 /* Can't issue SREJ frames in the local busy state.
6153 * Drop this frame, it will be seen as missing
6154 * when local busy is exited.
6156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6157 BT_DBG("Busy, discarding unexpected seq %d",
6158 control->txseq);
6159 break;
6162 /* There was a gap in the sequence, so an SREJ
6163 * must be sent for each missing frame. The
6164 * current frame is stored for later use.
6166 skb_queue_tail(&chan->srej_q, skb);
6167 skb_in_use = true;
6168 BT_DBG("Queued %p (queue len %d)", skb,
6169 skb_queue_len(&chan->srej_q));
6171 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6172 l2cap_seq_list_clear(&chan->srej_list);
6173 l2cap_send_srej(chan, control->txseq);
6175 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6176 break;
6177 case L2CAP_TXSEQ_DUPLICATE:
6178 l2cap_pass_to_tx(chan, control);
6179 break;
6180 case L2CAP_TXSEQ_INVALID_IGNORE:
6181 break;
6182 case L2CAP_TXSEQ_INVALID:
6183 default:
6184 l2cap_send_disconn_req(chan, ECONNRESET);
6185 break;
6187 break;
6188 case L2CAP_EV_RECV_RR:
6189 l2cap_pass_to_tx(chan, control);
6190 if (control->final) {
6191 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6193 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6194 !__chan_is_moving(chan)) {
6195 control->final = 0;
6196 l2cap_retransmit_all(chan, control);
6199 l2cap_ertm_send(chan);
6200 } else if (control->poll) {
6201 l2cap_send_i_or_rr_or_rnr(chan);
6202 } else {
6203 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6204 &chan->conn_state) &&
6205 chan->unacked_frames)
6206 __set_retrans_timer(chan);
6208 l2cap_ertm_send(chan);
6210 break;
6211 case L2CAP_EV_RECV_RNR:
6212 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6213 l2cap_pass_to_tx(chan, control);
6214 if (control && control->poll) {
6215 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6216 l2cap_send_rr_or_rnr(chan, 0);
6218 __clear_retrans_timer(chan);
6219 l2cap_seq_list_clear(&chan->retrans_list);
6220 break;
6221 case L2CAP_EV_RECV_REJ:
6222 l2cap_handle_rej(chan, control);
6223 break;
6224 case L2CAP_EV_RECV_SREJ:
6225 l2cap_handle_srej(chan, control);
6226 break;
6227 default:
6228 break;
6231 if (skb && !skb_in_use) {
6232 BT_DBG("Freeing %p", skb);
6233 kfree_skb(skb);
6236 return err;
6239 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6240 struct l2cap_ctrl *control,
6241 struct sk_buff *skb, u8 event)
6243 int err = 0;
6244 u16 txseq = control->txseq;
6245 bool skb_in_use = false;
6247 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6248 event);
6250 switch (event) {
6251 case L2CAP_EV_RECV_IFRAME:
6252 switch (l2cap_classify_txseq(chan, txseq)) {
6253 case L2CAP_TXSEQ_EXPECTED:
6254 /* Keep frame for reassembly later */
6255 l2cap_pass_to_tx(chan, control);
6256 skb_queue_tail(&chan->srej_q, skb);
6257 skb_in_use = true;
6258 BT_DBG("Queued %p (queue len %d)", skb,
6259 skb_queue_len(&chan->srej_q));
6261 chan->expected_tx_seq = __next_seq(chan, txseq);
6262 break;
6263 case L2CAP_TXSEQ_EXPECTED_SREJ:
6264 l2cap_seq_list_pop(&chan->srej_list);
6266 l2cap_pass_to_tx(chan, control);
6267 skb_queue_tail(&chan->srej_q, skb);
6268 skb_in_use = true;
6269 BT_DBG("Queued %p (queue len %d)", skb,
6270 skb_queue_len(&chan->srej_q));
6272 err = l2cap_rx_queued_iframes(chan);
6273 if (err)
6274 break;
6276 break;
6277 case L2CAP_TXSEQ_UNEXPECTED:
6278 /* Got a frame that can't be reassembled yet.
6279 * Save it for later, and send SREJs to cover
6280 * the missing frames.
6282 skb_queue_tail(&chan->srej_q, skb);
6283 skb_in_use = true;
6284 BT_DBG("Queued %p (queue len %d)", skb,
6285 skb_queue_len(&chan->srej_q));
6287 l2cap_pass_to_tx(chan, control);
6288 l2cap_send_srej(chan, control->txseq);
6289 break;
6290 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6291 /* This frame was requested with an SREJ, but
6292 * some expected retransmitted frames are
6293 * missing. Request retransmission of missing
6294 * SREJ'd frames.
6296 skb_queue_tail(&chan->srej_q, skb);
6297 skb_in_use = true;
6298 BT_DBG("Queued %p (queue len %d)", skb,
6299 skb_queue_len(&chan->srej_q));
6301 l2cap_pass_to_tx(chan, control);
6302 l2cap_send_srej_list(chan, control->txseq);
6303 break;
6304 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6305 /* We've already queued this frame. Drop this copy. */
6306 l2cap_pass_to_tx(chan, control);
6307 break;
6308 case L2CAP_TXSEQ_DUPLICATE:
6309 /* Expecting a later sequence number, so this frame
6310 * was already received. Ignore it completely.
6312 break;
6313 case L2CAP_TXSEQ_INVALID_IGNORE:
6314 break;
6315 case L2CAP_TXSEQ_INVALID:
6316 default:
6317 l2cap_send_disconn_req(chan, ECONNRESET);
6318 break;
6320 break;
6321 case L2CAP_EV_RECV_RR:
6322 l2cap_pass_to_tx(chan, control);
6323 if (control->final) {
6324 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6326 if (!test_and_clear_bit(CONN_REJ_ACT,
6327 &chan->conn_state)) {
6328 control->final = 0;
6329 l2cap_retransmit_all(chan, control);
6332 l2cap_ertm_send(chan);
6333 } else if (control->poll) {
6334 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6335 &chan->conn_state) &&
6336 chan->unacked_frames) {
6337 __set_retrans_timer(chan);
6340 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6341 l2cap_send_srej_tail(chan);
6342 } else {
6343 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6344 &chan->conn_state) &&
6345 chan->unacked_frames)
6346 __set_retrans_timer(chan);
6348 l2cap_send_ack(chan);
6350 break;
6351 case L2CAP_EV_RECV_RNR:
6352 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6353 l2cap_pass_to_tx(chan, control);
6354 if (control->poll) {
6355 l2cap_send_srej_tail(chan);
6356 } else {
6357 struct l2cap_ctrl rr_control;
6358 memset(&rr_control, 0, sizeof(rr_control));
6359 rr_control.sframe = 1;
6360 rr_control.super = L2CAP_SUPER_RR;
6361 rr_control.reqseq = chan->buffer_seq;
6362 l2cap_send_sframe(chan, &rr_control);
6365 break;
6366 case L2CAP_EV_RECV_REJ:
6367 l2cap_handle_rej(chan, control);
6368 break;
6369 case L2CAP_EV_RECV_SREJ:
6370 l2cap_handle_srej(chan, control);
6371 break;
6374 if (skb && !skb_in_use) {
6375 BT_DBG("Freeing %p", skb);
6376 kfree_skb(skb);
6379 return err;
6382 static int l2cap_finish_move(struct l2cap_chan *chan)
6384 BT_DBG("chan %p", chan);
6386 chan->rx_state = L2CAP_RX_STATE_RECV;
6388 if (chan->hs_hcon)
6389 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6390 else
6391 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6393 return l2cap_resegment(chan);
6396 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6397 struct l2cap_ctrl *control,
6398 struct sk_buff *skb, u8 event)
6400 int err;
6402 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6403 event);
6405 if (!control->poll)
6406 return -EPROTO;
6408 l2cap_process_reqseq(chan, control->reqseq);
6410 if (!skb_queue_empty(&chan->tx_q))
6411 chan->tx_send_head = skb_peek(&chan->tx_q);
6412 else
6413 chan->tx_send_head = NULL;
6415 /* Rewind next_tx_seq to the point expected
6416 * by the receiver.
6418 chan->next_tx_seq = control->reqseq;
6419 chan->unacked_frames = 0;
6421 err = l2cap_finish_move(chan);
6422 if (err)
6423 return err;
6425 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6426 l2cap_send_i_or_rr_or_rnr(chan);
6428 if (event == L2CAP_EV_RECV_IFRAME)
6429 return -EPROTO;
6431 return l2cap_rx_state_recv(chan, control, NULL, event);
6434 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6435 struct l2cap_ctrl *control,
6436 struct sk_buff *skb, u8 event)
6438 int err;
6440 if (!control->final)
6441 return -EPROTO;
6443 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6445 chan->rx_state = L2CAP_RX_STATE_RECV;
6446 l2cap_process_reqseq(chan, control->reqseq);
6448 if (!skb_queue_empty(&chan->tx_q))
6449 chan->tx_send_head = skb_peek(&chan->tx_q);
6450 else
6451 chan->tx_send_head = NULL;
6453 /* Rewind next_tx_seq to the point expected
6454 * by the receiver.
6456 chan->next_tx_seq = control->reqseq;
6457 chan->unacked_frames = 0;
6459 if (chan->hs_hcon)
6460 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6461 else
6462 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6464 err = l2cap_resegment(chan);
6466 if (!err)
6467 err = l2cap_rx_state_recv(chan, control, skb, event);
6469 return err;
6472 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6474 /* Make sure reqseq is for a packet that has been sent but not acked */
6475 u16 unacked;
6477 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6478 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6481 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6482 struct sk_buff *skb, u8 event)
6484 int err = 0;
6486 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6487 control, skb, event, chan->rx_state);
6489 if (__valid_reqseq(chan, control->reqseq)) {
6490 switch (chan->rx_state) {
6491 case L2CAP_RX_STATE_RECV:
6492 err = l2cap_rx_state_recv(chan, control, skb, event);
6493 break;
6494 case L2CAP_RX_STATE_SREJ_SENT:
6495 err = l2cap_rx_state_srej_sent(chan, control, skb,
6496 event);
6497 break;
6498 case L2CAP_RX_STATE_WAIT_P:
6499 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6500 break;
6501 case L2CAP_RX_STATE_WAIT_F:
6502 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6503 break;
6504 default:
6505 /* shut it down */
6506 break;
6508 } else {
6509 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6510 control->reqseq, chan->next_tx_seq,
6511 chan->expected_ack_seq);
6512 l2cap_send_disconn_req(chan, ECONNRESET);
6515 return err;
6518 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6519 struct sk_buff *skb)
6521 int err = 0;
6523 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6524 chan->rx_state);
6526 if (l2cap_classify_txseq(chan, control->txseq) ==
6527 L2CAP_TXSEQ_EXPECTED) {
6528 l2cap_pass_to_tx(chan, control);
6530 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6531 __next_seq(chan, chan->buffer_seq));
6533 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6535 l2cap_reassemble_sdu(chan, skb, control);
6536 } else {
6537 if (chan->sdu) {
6538 kfree_skb(chan->sdu);
6539 chan->sdu = NULL;
6541 chan->sdu_last_frag = NULL;
6542 chan->sdu_len = 0;
6544 if (skb) {
6545 BT_DBG("Freeing %p", skb);
6546 kfree_skb(skb);
6550 chan->last_acked_seq = control->txseq;
6551 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6553 return err;
6556 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6558 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6559 u16 len;
6560 u8 event;
6562 __unpack_control(chan, skb);
6564 len = skb->len;
6567 * We can just drop the corrupted I-frame here.
6568 * Receiver will miss it and start proper recovery
6569 * procedures and ask for retransmission.
6571 if (l2cap_check_fcs(chan, skb))
6572 goto drop;
6574 if (!control->sframe && control->sar == L2CAP_SAR_START)
6575 len -= L2CAP_SDULEN_SIZE;
6577 if (chan->fcs == L2CAP_FCS_CRC16)
6578 len -= L2CAP_FCS_SIZE;
6580 if (len > chan->mps) {
6581 l2cap_send_disconn_req(chan, ECONNRESET);
6582 goto drop;
6585 if (!control->sframe) {
6586 int err;
6588 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6589 control->sar, control->reqseq, control->final,
6590 control->txseq);
6592 /* Validate F-bit - F=0 always valid, F=1 only
6593 * valid in TX WAIT_F
6595 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6596 goto drop;
6598 if (chan->mode != L2CAP_MODE_STREAMING) {
6599 event = L2CAP_EV_RECV_IFRAME;
6600 err = l2cap_rx(chan, control, skb, event);
6601 } else {
6602 err = l2cap_stream_rx(chan, control, skb);
6605 if (err)
6606 l2cap_send_disconn_req(chan, ECONNRESET);
6607 } else {
6608 const u8 rx_func_to_event[4] = {
6609 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6610 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6613 /* Only I-frames are expected in streaming mode */
6614 if (chan->mode == L2CAP_MODE_STREAMING)
6615 goto drop;
6617 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6618 control->reqseq, control->final, control->poll,
6619 control->super);
6621 if (len != 0) {
6622 BT_ERR("Trailing bytes: %d in sframe", len);
6623 l2cap_send_disconn_req(chan, ECONNRESET);
6624 goto drop;
6627 /* Validate F and P bits */
6628 if (control->final && (control->poll ||
6629 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6630 goto drop;
6632 event = rx_func_to_event[control->super];
6633 if (l2cap_rx(chan, control, skb, event))
6634 l2cap_send_disconn_req(chan, ECONNRESET);
6637 return 0;
6639 drop:
6640 kfree_skb(skb);
6641 return 0;
6644 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6646 struct l2cap_conn *conn = chan->conn;
6647 struct l2cap_le_credits pkt;
6648 u16 return_credits;
6650 /* We return more credits to the sender only after the amount of
6651 * credits falls below half of the initial amount.
6653 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6654 return;
6656 return_credits = le_max_credits - chan->rx_credits;
6658 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6660 chan->rx_credits += return_credits;
6662 pkt.cid = cpu_to_le16(chan->scid);
6663 pkt.credits = cpu_to_le16(return_credits);
6665 chan->ident = l2cap_get_ident(conn);
6667 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6670 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6672 int err;
6674 if (!chan->rx_credits) {
6675 BT_ERR("No credits to receive LE L2CAP data");
6676 l2cap_send_disconn_req(chan, ECONNRESET);
6677 return -ENOBUFS;
6680 if (chan->imtu < skb->len) {
6681 BT_ERR("Too big LE L2CAP PDU");
6682 return -ENOBUFS;
6685 chan->rx_credits--;
6686 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6688 l2cap_chan_le_send_credits(chan);
6690 err = 0;
6692 if (!chan->sdu) {
6693 u16 sdu_len;
6695 sdu_len = get_unaligned_le16(skb->data);
6696 skb_pull(skb, L2CAP_SDULEN_SIZE);
6698 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6699 sdu_len, skb->len, chan->imtu);
6701 if (sdu_len > chan->imtu) {
6702 BT_ERR("Too big LE L2CAP SDU length received");
6703 err = -EMSGSIZE;
6704 goto failed;
6707 if (skb->len > sdu_len) {
6708 BT_ERR("Too much LE L2CAP data received");
6709 err = -EINVAL;
6710 goto failed;
6713 if (skb->len == sdu_len)
6714 return chan->ops->recv(chan, skb);
6716 chan->sdu = skb;
6717 chan->sdu_len = sdu_len;
6718 chan->sdu_last_frag = skb;
6720 return 0;
6723 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6724 chan->sdu->len, skb->len, chan->sdu_len);
6726 if (chan->sdu->len + skb->len > chan->sdu_len) {
6727 BT_ERR("Too much LE L2CAP data received");
6728 err = -EINVAL;
6729 goto failed;
6732 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6733 skb = NULL;
6735 if (chan->sdu->len == chan->sdu_len) {
6736 err = chan->ops->recv(chan, chan->sdu);
6737 if (!err) {
6738 chan->sdu = NULL;
6739 chan->sdu_last_frag = NULL;
6740 chan->sdu_len = 0;
6744 failed:
6745 if (err) {
6746 kfree_skb(skb);
6747 kfree_skb(chan->sdu);
6748 chan->sdu = NULL;
6749 chan->sdu_last_frag = NULL;
6750 chan->sdu_len = 0;
6753 /* We can't return an error here since we took care of the skb
6754 * freeing internally. An error return would cause the caller to
6755 * do a double-free of the skb.
6757 return 0;
6760 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6761 struct sk_buff *skb)
6763 struct l2cap_chan *chan;
6765 chan = l2cap_get_chan_by_scid(conn, cid);
6766 if (!chan) {
6767 if (cid == L2CAP_CID_A2MP) {
6768 chan = a2mp_channel_create(conn, skb);
6769 if (!chan) {
6770 kfree_skb(skb);
6771 return;
6774 l2cap_chan_lock(chan);
6775 } else {
6776 BT_DBG("unknown cid 0x%4.4x", cid);
6777 /* Drop packet and return */
6778 kfree_skb(skb);
6779 return;
6783 BT_DBG("chan %p, len %d", chan, skb->len);
6785 if (chan->state != BT_CONNECTED)
6786 goto drop;
6788 switch (chan->mode) {
6789 case L2CAP_MODE_LE_FLOWCTL:
6790 if (l2cap_le_data_rcv(chan, skb) < 0)
6791 goto drop;
6793 goto done;
6795 case L2CAP_MODE_BASIC:
6796 /* If socket recv buffers overflows we drop data here
6797 * which is *bad* because L2CAP has to be reliable.
6798 * But we don't have any other choice. L2CAP doesn't
6799 * provide flow control mechanism. */
6801 if (chan->imtu < skb->len) {
6802 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6803 goto drop;
6806 if (!chan->ops->recv(chan, skb))
6807 goto done;
6808 break;
6810 case L2CAP_MODE_ERTM:
6811 case L2CAP_MODE_STREAMING:
6812 l2cap_data_rcv(chan, skb);
6813 goto done;
6815 default:
6816 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6817 break;
6820 drop:
6821 kfree_skb(skb);
6823 done:
6824 l2cap_chan_unlock(chan);
6827 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6828 struct sk_buff *skb)
6830 struct hci_conn *hcon = conn->hcon;
6831 struct l2cap_chan *chan;
6833 if (hcon->type != ACL_LINK)
6834 goto drop;
6836 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6837 ACL_LINK);
6838 if (!chan)
6839 goto drop;
6841 BT_DBG("chan %p, len %d", chan, skb->len);
6843 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6844 goto drop;
6846 if (chan->imtu < skb->len)
6847 goto drop;
6849 /* Store remote BD_ADDR and PSM for msg_name */
6850 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6851 bt_cb(skb)->psm = psm;
6853 if (!chan->ops->recv(chan, skb))
6854 return;
6856 drop:
6857 kfree_skb(skb);
6860 static void l2cap_att_channel(struct l2cap_conn *conn,
6861 struct sk_buff *skb)
6863 struct hci_conn *hcon = conn->hcon;
6864 struct l2cap_chan *chan;
6866 if (hcon->type != LE_LINK)
6867 goto drop;
6869 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6870 &hcon->src, &hcon->dst);
6871 if (!chan)
6872 goto drop;
6874 BT_DBG("chan %p, len %d", chan, skb->len);
6876 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6877 goto drop;
6879 if (chan->imtu < skb->len)
6880 goto drop;
6882 if (!chan->ops->recv(chan, skb))
6883 return;
6885 drop:
6886 kfree_skb(skb);
6889 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6891 struct l2cap_hdr *lh = (void *) skb->data;
6892 struct hci_conn *hcon = conn->hcon;
6893 u16 cid, len;
6894 __le16 psm;
6896 if (hcon->state != BT_CONNECTED) {
6897 BT_DBG("queueing pending rx skb");
6898 skb_queue_tail(&conn->pending_rx, skb);
6899 return;
6902 skb_pull(skb, L2CAP_HDR_SIZE);
6903 cid = __le16_to_cpu(lh->cid);
6904 len = __le16_to_cpu(lh->len);
6906 if (len != skb->len) {
6907 kfree_skb(skb);
6908 return;
6911 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6913 switch (cid) {
6914 case L2CAP_CID_SIGNALING:
6915 l2cap_sig_channel(conn, skb);
6916 break;
6918 case L2CAP_CID_CONN_LESS:
6919 psm = get_unaligned((__le16 *) skb->data);
6920 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6921 l2cap_conless_channel(conn, psm, skb);
6922 break;
6924 case L2CAP_CID_ATT:
6925 l2cap_att_channel(conn, skb);
6926 break;
6928 case L2CAP_CID_LE_SIGNALING:
6929 l2cap_le_sig_channel(conn, skb);
6930 break;
6932 case L2CAP_CID_SMP:
6933 if (smp_sig_channel(conn, skb))
6934 l2cap_conn_del(conn->hcon, EACCES);
6935 break;
6937 case L2CAP_FC_6LOWPAN:
6938 bt_6lowpan_recv(conn, skb);
6939 break;
6941 default:
6942 l2cap_data_channel(conn, cid, skb);
6943 break;
6947 static void process_pending_rx(struct work_struct *work)
6949 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6950 pending_rx_work);
6951 struct sk_buff *skb;
6953 BT_DBG("");
6955 while ((skb = skb_dequeue(&conn->pending_rx)))
6956 l2cap_recv_frame(conn, skb);
6959 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6961 struct l2cap_conn *conn = hcon->l2cap_data;
6962 struct hci_chan *hchan;
6964 if (conn)
6965 return conn;
6967 hchan = hci_chan_create(hcon);
6968 if (!hchan)
6969 return NULL;
6971 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6972 if (!conn) {
6973 hci_chan_del(hchan);
6974 return NULL;
6977 kref_init(&conn->ref);
6978 hcon->l2cap_data = conn;
6979 conn->hcon = hcon;
6980 hci_conn_get(conn->hcon);
6981 conn->hchan = hchan;
6983 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6985 switch (hcon->type) {
6986 case LE_LINK:
6987 if (hcon->hdev->le_mtu) {
6988 conn->mtu = hcon->hdev->le_mtu;
6989 break;
6991 /* fall through */
6992 default:
6993 conn->mtu = hcon->hdev->acl_mtu;
6994 break;
6997 conn->feat_mask = 0;
6999 if (hcon->type == ACL_LINK)
7000 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7001 &hcon->hdev->dev_flags);
7003 spin_lock_init(&conn->lock);
7004 mutex_init(&conn->chan_lock);
7006 INIT_LIST_HEAD(&conn->chan_l);
7007 INIT_LIST_HEAD(&conn->users);
7009 if (hcon->type == LE_LINK)
7010 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7011 else
7012 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7014 skb_queue_head_init(&conn->pending_rx);
7015 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7017 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7019 return conn;
7022 static bool is_valid_psm(u16 psm, u8 dst_type) {
7023 if (!psm)
7024 return false;
7026 if (bdaddr_type_is_le(dst_type))
7027 return (psm <= 0x00ff);
7029 /* PSM must be odd and lsb of upper byte must be 0 */
7030 return ((psm & 0x0101) == 0x0001);
7033 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7034 bdaddr_t *dst, u8 dst_type)
7036 struct l2cap_conn *conn;
7037 struct hci_conn *hcon;
7038 struct hci_dev *hdev;
7039 __u8 auth_type;
7040 int err;
7042 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7043 dst_type, __le16_to_cpu(psm));
7045 hdev = hci_get_route(dst, &chan->src);
7046 if (!hdev)
7047 return -EHOSTUNREACH;
7049 hci_dev_lock(hdev);
7051 l2cap_chan_lock(chan);
7053 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7054 chan->chan_type != L2CAP_CHAN_RAW) {
7055 err = -EINVAL;
7056 goto done;
7059 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7060 err = -EINVAL;
7061 goto done;
7064 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7065 err = -EINVAL;
7066 goto done;
7069 switch (chan->mode) {
7070 case L2CAP_MODE_BASIC:
7071 break;
7072 case L2CAP_MODE_LE_FLOWCTL:
7073 l2cap_le_flowctl_init(chan);
7074 break;
7075 case L2CAP_MODE_ERTM:
7076 case L2CAP_MODE_STREAMING:
7077 if (!disable_ertm)
7078 break;
7079 /* fall through */
7080 default:
7081 err = -ENOTSUPP;
7082 goto done;
7085 switch (chan->state) {
7086 case BT_CONNECT:
7087 case BT_CONNECT2:
7088 case BT_CONFIG:
7089 /* Already connecting */
7090 err = 0;
7091 goto done;
7093 case BT_CONNECTED:
7094 /* Already connected */
7095 err = -EISCONN;
7096 goto done;
7098 case BT_OPEN:
7099 case BT_BOUND:
7100 /* Can connect */
7101 break;
7103 default:
7104 err = -EBADFD;
7105 goto done;
7108 /* Set destination address and psm */
7109 bacpy(&chan->dst, dst);
7110 chan->dst_type = dst_type;
7112 chan->psm = psm;
7113 chan->dcid = cid;
7115 auth_type = l2cap_get_auth_type(chan);
7117 if (bdaddr_type_is_le(dst_type)) {
7118 /* Convert from L2CAP channel address type to HCI address type
7120 if (dst_type == BDADDR_LE_PUBLIC)
7121 dst_type = ADDR_LE_DEV_PUBLIC;
7122 else
7123 dst_type = ADDR_LE_DEV_RANDOM;
7125 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7126 auth_type);
7127 } else {
7128 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7131 if (IS_ERR(hcon)) {
7132 err = PTR_ERR(hcon);
7133 goto done;
7136 conn = l2cap_conn_add(hcon);
7137 if (!conn) {
7138 hci_conn_drop(hcon);
7139 err = -ENOMEM;
7140 goto done;
7143 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7144 hci_conn_drop(hcon);
7145 err = -EBUSY;
7146 goto done;
7149 /* Update source addr of the socket */
7150 bacpy(&chan->src, &hcon->src);
7151 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7153 l2cap_chan_unlock(chan);
7154 l2cap_chan_add(conn, chan);
7155 l2cap_chan_lock(chan);
7157 /* l2cap_chan_add takes its own ref so we can drop this one */
7158 hci_conn_drop(hcon);
7160 l2cap_state_change(chan, BT_CONNECT);
7161 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7163 /* Release chan->sport so that it can be reused by other
7164 * sockets (as it's only used for listening sockets).
7166 write_lock(&chan_list_lock);
7167 chan->sport = 0;
7168 write_unlock(&chan_list_lock);
7170 if (hcon->state == BT_CONNECTED) {
7171 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7172 __clear_chan_timer(chan);
7173 if (l2cap_chan_check_security(chan))
7174 l2cap_state_change(chan, BT_CONNECTED);
7175 } else
7176 l2cap_do_start(chan);
7179 err = 0;
7181 done:
7182 l2cap_chan_unlock(chan);
7183 hci_dev_unlock(hdev);
7184 hci_dev_put(hdev);
7185 return err;
7188 /* ---- L2CAP interface with lower layer (HCI) ---- */
7190 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7192 int exact = 0, lm1 = 0, lm2 = 0;
7193 struct l2cap_chan *c;
7195 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7197 /* Find listening sockets and check their link_mode */
7198 read_lock(&chan_list_lock);
7199 list_for_each_entry(c, &chan_list, global_l) {
7200 if (c->state != BT_LISTEN)
7201 continue;
7203 if (!bacmp(&c->src, &hdev->bdaddr)) {
7204 lm1 |= HCI_LM_ACCEPT;
7205 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7206 lm1 |= HCI_LM_MASTER;
7207 exact++;
7208 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7209 lm2 |= HCI_LM_ACCEPT;
7210 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7211 lm2 |= HCI_LM_MASTER;
7214 read_unlock(&chan_list_lock);
7216 return exact ? lm1 : lm2;
7219 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7221 struct l2cap_conn *conn;
7223 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7225 if (!status) {
7226 conn = l2cap_conn_add(hcon);
7227 if (conn)
7228 l2cap_conn_ready(conn);
7229 } else {
7230 l2cap_conn_del(hcon, bt_to_errno(status));
7234 int l2cap_disconn_ind(struct hci_conn *hcon)
7236 struct l2cap_conn *conn = hcon->l2cap_data;
7238 BT_DBG("hcon %p", hcon);
7240 if (!conn)
7241 return HCI_ERROR_REMOTE_USER_TERM;
7242 return conn->disc_reason;
7245 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7247 BT_DBG("hcon %p reason %d", hcon, reason);
7249 bt_6lowpan_del_conn(hcon->l2cap_data);
7251 l2cap_conn_del(hcon, bt_to_errno(reason));
7254 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7256 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7257 return;
7259 if (encrypt == 0x00) {
7260 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7261 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7262 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7263 chan->sec_level == BT_SECURITY_FIPS)
7264 l2cap_chan_close(chan, ECONNREFUSED);
7265 } else {
7266 if (chan->sec_level == BT_SECURITY_MEDIUM)
7267 __clear_chan_timer(chan);
7271 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7273 struct l2cap_conn *conn = hcon->l2cap_data;
7274 struct l2cap_chan *chan;
7276 if (!conn)
7277 return 0;
7279 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7281 if (hcon->type == LE_LINK) {
7282 if (!status && encrypt)
7283 smp_distribute_keys(conn);
7284 cancel_delayed_work(&conn->security_timer);
7287 mutex_lock(&conn->chan_lock);
7289 list_for_each_entry(chan, &conn->chan_l, list) {
7290 l2cap_chan_lock(chan);
7292 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7293 state_to_string(chan->state));
7295 if (chan->scid == L2CAP_CID_A2MP) {
7296 l2cap_chan_unlock(chan);
7297 continue;
7300 if (chan->scid == L2CAP_CID_ATT) {
7301 if (!status && encrypt) {
7302 chan->sec_level = hcon->sec_level;
7303 l2cap_chan_ready(chan);
7306 l2cap_chan_unlock(chan);
7307 continue;
7310 if (!__l2cap_no_conn_pending(chan)) {
7311 l2cap_chan_unlock(chan);
7312 continue;
7315 if (!status && (chan->state == BT_CONNECTED ||
7316 chan->state == BT_CONFIG)) {
7317 chan->ops->resume(chan);
7318 l2cap_check_encryption(chan, encrypt);
7319 l2cap_chan_unlock(chan);
7320 continue;
7323 if (chan->state == BT_CONNECT) {
7324 if (!status)
7325 l2cap_start_connection(chan);
7326 else
7327 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7328 } else if (chan->state == BT_CONNECT2) {
7329 struct l2cap_conn_rsp rsp;
7330 __u16 res, stat;
7332 if (!status) {
7333 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7334 res = L2CAP_CR_PEND;
7335 stat = L2CAP_CS_AUTHOR_PEND;
7336 chan->ops->defer(chan);
7337 } else {
7338 l2cap_state_change(chan, BT_CONFIG);
7339 res = L2CAP_CR_SUCCESS;
7340 stat = L2CAP_CS_NO_INFO;
7342 } else {
7343 l2cap_state_change(chan, BT_DISCONN);
7344 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7345 res = L2CAP_CR_SEC_BLOCK;
7346 stat = L2CAP_CS_NO_INFO;
7349 rsp.scid = cpu_to_le16(chan->dcid);
7350 rsp.dcid = cpu_to_le16(chan->scid);
7351 rsp.result = cpu_to_le16(res);
7352 rsp.status = cpu_to_le16(stat);
7353 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7354 sizeof(rsp), &rsp);
7356 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7357 res == L2CAP_CR_SUCCESS) {
7358 char buf[128];
7359 set_bit(CONF_REQ_SENT, &chan->conf_state);
7360 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7361 L2CAP_CONF_REQ,
7362 l2cap_build_conf_req(chan, buf),
7363 buf);
7364 chan->num_conf_req++;
7368 l2cap_chan_unlock(chan);
7371 mutex_unlock(&conn->chan_lock);
7373 return 0;
7376 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7378 struct l2cap_conn *conn = hcon->l2cap_data;
7379 struct l2cap_hdr *hdr;
7380 int len;
7382 /* For AMP controller do not create l2cap conn */
7383 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7384 goto drop;
7386 if (!conn)
7387 conn = l2cap_conn_add(hcon);
7389 if (!conn)
7390 goto drop;
7392 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7394 switch (flags) {
7395 case ACL_START:
7396 case ACL_START_NO_FLUSH:
7397 case ACL_COMPLETE:
7398 if (conn->rx_len) {
7399 BT_ERR("Unexpected start frame (len %d)", skb->len);
7400 kfree_skb(conn->rx_skb);
7401 conn->rx_skb = NULL;
7402 conn->rx_len = 0;
7403 l2cap_conn_unreliable(conn, ECOMM);
7406 /* Start fragment always begin with Basic L2CAP header */
7407 if (skb->len < L2CAP_HDR_SIZE) {
7408 BT_ERR("Frame is too short (len %d)", skb->len);
7409 l2cap_conn_unreliable(conn, ECOMM);
7410 goto drop;
7413 hdr = (struct l2cap_hdr *) skb->data;
7414 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7416 if (len == skb->len) {
7417 /* Complete frame received */
7418 l2cap_recv_frame(conn, skb);
7419 return 0;
7422 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7424 if (skb->len > len) {
7425 BT_ERR("Frame is too long (len %d, expected len %d)",
7426 skb->len, len);
7427 l2cap_conn_unreliable(conn, ECOMM);
7428 goto drop;
7431 /* Allocate skb for the complete frame (with header) */
7432 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7433 if (!conn->rx_skb)
7434 goto drop;
7436 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7437 skb->len);
7438 conn->rx_len = len - skb->len;
7439 break;
7441 case ACL_CONT:
7442 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7444 if (!conn->rx_len) {
7445 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7446 l2cap_conn_unreliable(conn, ECOMM);
7447 goto drop;
7450 if (skb->len > conn->rx_len) {
7451 BT_ERR("Fragment is too long (len %d, expected %d)",
7452 skb->len, conn->rx_len);
7453 kfree_skb(conn->rx_skb);
7454 conn->rx_skb = NULL;
7455 conn->rx_len = 0;
7456 l2cap_conn_unreliable(conn, ECOMM);
7457 goto drop;
7460 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7461 skb->len);
7462 conn->rx_len -= skb->len;
7464 if (!conn->rx_len) {
7465 /* Complete frame received. l2cap_recv_frame
7466 * takes ownership of the skb so set the global
7467 * rx_skb pointer to NULL first.
7469 struct sk_buff *rx_skb = conn->rx_skb;
7470 conn->rx_skb = NULL;
7471 l2cap_recv_frame(conn, rx_skb);
7473 break;
7476 drop:
7477 kfree_skb(skb);
7478 return 0;
7481 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7483 struct l2cap_chan *c;
7485 read_lock(&chan_list_lock);
7487 list_for_each_entry(c, &chan_list, global_l) {
7488 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7489 &c->src, &c->dst,
7490 c->state, __le16_to_cpu(c->psm),
7491 c->scid, c->dcid, c->imtu, c->omtu,
7492 c->sec_level, c->mode);
7495 read_unlock(&chan_list_lock);
7497 return 0;
7500 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7502 return single_open(file, l2cap_debugfs_show, inode->i_private);
7505 static const struct file_operations l2cap_debugfs_fops = {
7506 .open = l2cap_debugfs_open,
7507 .read = seq_read,
7508 .llseek = seq_lseek,
7509 .release = single_release,
7512 static struct dentry *l2cap_debugfs;
7514 int __init l2cap_init(void)
7516 int err;
7518 err = l2cap_init_sockets();
7519 if (err < 0)
7520 return err;
7522 if (IS_ERR_OR_NULL(bt_debugfs))
7523 return 0;
7525 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7526 NULL, &l2cap_debugfs_fops);
7528 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7529 &le_max_credits);
7530 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7531 &le_default_mps);
7533 bt_6lowpan_init();
7535 return 0;
7538 void l2cap_exit(void)
7540 bt_6lowpan_cleanup();
7541 debugfs_remove(l2cap_debugfs);
7542 l2cap_cleanup_sockets();
7545 module_param(disable_ertm, bool, 0644);
7546 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");