Merge branch 'x86/rdrand' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[wandboard.git] / net / bluetooth / l2cap_core.c
blobfaf0b11ac1d3610805e6d049f4a2e2221c35053a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 struct l2cap_chan *c, *r = NULL;
84 rcu_read_lock();
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
87 if (c->dcid == cid) {
88 r = c;
89 break;
93 rcu_read_unlock();
94 return r;
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
99 struct l2cap_chan *c, *r = NULL;
101 rcu_read_lock();
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
105 r = c;
106 break;
110 rcu_read_unlock();
111 return r;
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 lock_sock(c->sk);
123 return c;
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c, *r = NULL;
130 rcu_read_lock();
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
139 rcu_read_unlock();
140 return r;
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
145 struct l2cap_chan *c;
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 lock_sock(c->sk);
150 return c;
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 struct l2cap_chan *c;
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
159 return c;
161 return NULL;
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 int err;
168 write_lock(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
192 done:
193 write_unlock(&chan_list_lock);
194 return err;
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock(&chan_list_lock);
201 chan->scid = scid;
203 write_unlock(&chan_list_lock);
205 return 0;
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
217 return 0;
220 static char *state_to_string(int state)
222 switch(state) {
223 case BT_CONNECTED:
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
243 return "invalid state";
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
251 chan->state = state;
252 chan->ops->state_change(chan->data, state);
255 static void l2cap_chan_timeout(struct work_struct *work)
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work);
259 struct sock *sk = chan->sk;
260 int reason;
262 BT_DBG("chan %p state %d", chan, chan->state);
264 lock_sock(sk);
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
271 else
272 reason = ETIMEDOUT;
274 l2cap_chan_close(chan, reason);
276 release_sock(sk);
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
284 struct l2cap_chan *chan;
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 if (!chan)
288 return NULL;
290 chan->sk = sk;
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
298 chan->state = BT_OPEN;
300 atomic_set(&chan->refcnt, 1);
302 BT_DBG("sk %p chan %p", sk, chan);
304 return chan;
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
313 l2cap_chan_put(chan);
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
323 chan->conn = conn;
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
331 } else {
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else {
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
355 l2cap_chan_hold(chan);
357 list_add_rcu(&chan->list, &conn->chan_l);
360 /* Delete channel.
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
368 __clear_chan_timer(chan);
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
372 if (conn) {
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
375 synchronize_rcu();
377 l2cap_chan_put(chan);
379 chan->conn = NULL;
380 hci_conn_put(conn->hcon);
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
386 if (err)
387 sk->sk_err = err;
389 if (parent) {
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
392 } else
393 sk->sk_state_change(sk);
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return;
399 skb_queue_purge(&chan->tx_q);
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
408 skb_queue_purge(&chan->srej_q);
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list);
412 kfree(l);
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 struct sock *sk;
421 BT_DBG("parent %p", parent);
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
441 switch (chan->state) {
442 case BT_LISTEN:
443 l2cap_chan_cleanup_listen(sk);
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
447 break;
449 case BT_CONNECTED:
450 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
456 } else
457 l2cap_chan_del(chan, reason);
458 break;
460 case BT_CONNECT2:
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
464 __u16 result;
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
468 else
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
477 sizeof(rsp), &rsp);
480 l2cap_chan_del(chan, reason);
481 break;
483 case BT_CONNECT:
484 case BT_DISCONN:
485 l2cap_chan_del(chan, reason);
486 break;
488 default:
489 sock_set_flag(sk, SOCK_ZAPPED);
490 break;
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
502 default:
503 return HCI_AT_NO_BONDING;
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
511 else
512 return HCI_AT_NO_BONDING;
513 } else {
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
519 default:
520 return HCI_AT_NO_BONDING;
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
528 struct l2cap_conn *conn = chan->conn;
529 __u8 auth_type;
531 auth_type = l2cap_get_auth_type(chan);
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
538 u8 id;
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
546 spin_lock(&conn->lock);
548 if (++conn->tx_ident > 128)
549 conn->tx_ident = 1;
551 id = conn->tx_ident;
553 spin_unlock(&conn->lock);
555 return id;
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 u8 flags;
563 BT_DBG("code 0x%2.2x", code);
565 if (!skb)
566 return;
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
570 else
571 flags = ACL_START;
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
576 hci_send_acl(conn->hchan, skb, flags);
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
581 struct hci_conn *hcon = chan->conn->hcon;
582 u16 flags;
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 skb->priority);
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
599 struct sk_buff *skb;
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
602 int count, hlen;
604 if (chan->state != BT_CONNECTED)
605 return;
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
609 else
610 hlen = L2CAP_ENH_HDR_SIZE;
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
617 count = min_t(unsigned int, conn->mtu, hlen);
619 control |= __set_sframe(chan);
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
628 if (!skb)
629 return;
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
651 } else
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
654 control |= __set_reqseq(chan, chan->buffer_seq);
656 l2cap_send_sframe(chan, control);
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
664 static void l2cap_do_start(struct l2cap_chan *chan)
666 struct l2cap_conn *conn = chan->conn;
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return;
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
684 } else {
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
701 u32 local_feat_mask = l2cap_feat_mask;
702 if (!disable_ertm)
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
705 switch (mode) {
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 default:
711 return 0x00;
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
717 struct sock *sk;
718 struct l2cap_disconn_req req;
720 if (!conn)
721 return;
723 sk = chan->sk;
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
736 l2cap_state_change(chan, BT_DISCONN);
737 sk->sk_err = err;
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
743 struct l2cap_chan *chan;
745 BT_DBG("conn %p", conn);
747 rcu_read_lock();
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
752 bh_lock_sock(sk);
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk);
756 continue;
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk);
765 continue;
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk);
775 continue;
778 req.scid = cpu_to_le16(chan->scid);
779 req.psm = chan->psm;
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
801 } else {
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
806 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 sizeof(rsp), &rsp);
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk);
817 continue;
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
826 bh_unlock_sock(sk);
829 rcu_read_unlock();
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 struct l2cap_chan *c, *c1 = NULL;
839 read_lock(&chan_list_lock);
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
844 if (state && c->state != state)
845 continue;
847 if (c->scid == cid) {
848 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
851 return c;
854 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
856 c1 = c;
860 read_unlock(&chan_list_lock);
862 return c1;
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
870 BT_DBG("");
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src);
875 if (!pchan)
876 return;
878 parent = pchan->sk;
880 lock_sock(parent);
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 goto clean;
888 chan = pchan->ops->new_connection(pchan->data);
889 if (!chan)
890 goto clean;
892 sk = chan->sk;
894 hci_conn_hold(conn->hcon);
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
899 bt_accept_enqueue(parent, sk);
901 l2cap_chan_add(conn, chan);
903 __set_chan_timer(chan, sk->sk_sndtimeo);
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
908 clean:
909 release_sock(parent);
912 static void l2cap_chan_ready(struct sock *sk)
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
917 BT_DBG("sk %p, parent %p", sk, parent);
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
925 if (parent)
926 parent->sk_data_ready(parent, 0);
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 struct l2cap_chan *chan;
933 BT_DBG("conn %p", conn);
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
941 rcu_read_lock();
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
946 bh_lock_sock(sk);
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
960 bh_unlock_sock(sk);
963 rcu_read_unlock();
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 struct l2cap_chan *chan;
971 BT_DBG("conn %p", conn);
973 rcu_read_lock();
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err;
982 rcu_read_unlock();
985 static void l2cap_info_timeout(struct work_struct *work)
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 info_timer.work);
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1002 if (!conn)
1003 return;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 release_sock(sk);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1032 static void security_timeout(struct work_struct *work)
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1045 if (conn || status)
1046 return conn;
1048 hchan = hci_chan_create(hcon);
1049 if (!hchan)
1050 return NULL;
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 if (!conn) {
1054 hci_chan_del(hchan);
1055 return NULL;
1058 hcon->l2cap_data = conn;
1059 conn->hcon = hcon;
1060 conn->hchan = hchan;
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1066 else
1067 conn->mtu = hcon->hdev->acl_mtu;
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1072 conn->feat_mask = 0;
1074 spin_lock_init(&conn->lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1080 else
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1085 return conn;
1088 /* ---- Socket interface ---- */
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1095 struct l2cap_chan *c, *c1 = NULL;
1097 read_lock(&chan_list_lock);
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1102 if (state && c->state != state)
1103 continue;
1105 if (c->psm == psm) {
1106 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1109 return c;
1112 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1114 c1 = c;
1118 read_unlock(&chan_list_lock);
1120 return c1;
1123 inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1130 __u8 auth_type;
1131 int err;
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm);
1136 hdev = hci_get_route(dst, src);
1137 if (!hdev)
1138 return -EHOSTUNREACH;
1140 hci_dev_lock(hdev);
1142 lock_sock(sk);
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1147 err = -EINVAL;
1148 goto done;
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1152 err = -EINVAL;
1153 goto done;
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1158 break;
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1161 if (!disable_ertm)
1162 break;
1163 /* fall through */
1164 default:
1165 err = -ENOTSUPP;
1166 goto done;
1169 switch (sk->sk_state) {
1170 case BT_CONNECT:
1171 case BT_CONNECT2:
1172 case BT_CONFIG:
1173 /* Already connecting */
1174 err = 0;
1175 goto done;
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1187 default:
1188 err = -EBADFD;
1189 goto done;
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1194 chan->psm = psm;
1195 chan->dcid = cid;
1197 auth_type = l2cap_get_auth_type(chan);
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1202 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1206 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon);
1208 goto done;
1211 conn = l2cap_conn_add(hcon, 0);
1212 if (!conn) {
1213 hci_conn_put(hcon);
1214 err = -ENOMEM;
1215 goto done;
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1221 l2cap_chan_add(conn, chan);
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1231 } else
1232 l2cap_do_start(chan);
1235 err = 0;
1237 done:
1238 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return err;
1243 int __l2cap_wait_ack(struct sock *sk)
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1247 int err = 0;
1248 int timeo = HZ/5;
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1253 if (!timeo)
1254 timeo = HZ/5;
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1258 break;
1261 release_sock(sk);
1262 timeo = schedule_timeout(timeo);
1263 lock_sock(sk);
1264 set_current_state(TASK_INTERRUPTIBLE);
1266 err = sock_error(sk);
1267 if (err)
1268 break;
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1272 return err;
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1281 BT_DBG("chan %p", chan);
1283 lock_sock(sk);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk);
1287 return;
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk);
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1303 BT_DBG("chan %p", chan);
1305 lock_sock(sk);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk);
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1317 struct sk_buff *skb;
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 break;
1324 skb = skb_dequeue(&chan->tx_q);
1325 kfree_skb(skb);
1327 chan->unacked_frames--;
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1336 struct sk_buff *skb;
1337 u32 control;
1338 u16 fcs;
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1352 l2cap_do_send(chan, skb);
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1364 skb = skb_peek(&chan->tx_q);
1365 if (!skb)
1366 return;
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return;
1372 skb = skb_queue_next(&chan->tx_q, skb);
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return;
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1402 l2cap_do_send(chan, tx_skb);
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1407 struct sk_buff *skb, *tx_skb;
1408 u16 fcs;
1409 u32 control;
1410 int nsent = 0;
1412 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN;
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break;
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1425 bt_cb(skb)->retries++;
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1445 l2cap_do_send(chan, tx_skb);
1447 __set_retrans_timer(chan);
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1456 chan->frames_sent++;
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1460 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1463 nsent++;
1466 return nsent;
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1471 int ret;
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1478 return ret;
1481 static void l2cap_send_ack(struct l2cap_chan *chan)
1483 u32 control = 0;
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1491 return;
1494 if (l2cap_ertm_send(chan) > 0)
1495 return;
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1501 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1503 struct srej_list *tail;
1504 u32 control;
1506 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1507 control |= __set_ctrl_final(chan);
1509 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1510 control |= __set_reqseq(chan, tail->tx_seq);
1512 l2cap_send_sframe(chan, control);
1515 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1518 struct sk_buff **frag;
1519 int err, sent = 0;
1521 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1522 return -EFAULT;
1524 sent += count;
1525 len -= count;
1527 /* Continuation fragments (no L2CAP header) */
1528 frag = &skb_shinfo(skb)->frag_list;
1529 while (len) {
1530 count = min_t(unsigned int, conn->mtu, len);
1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1533 if (!*frag)
1534 return err;
1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1536 return -EFAULT;
1538 (*frag)->priority = skb->priority;
1540 sent += count;
1541 len -= count;
1543 frag = &(*frag)->next;
1546 return sent;
1549 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1550 struct msghdr *msg, size_t len,
1551 u32 priority)
1553 struct sock *sk = chan->sk;
1554 struct l2cap_conn *conn = chan->conn;
1555 struct sk_buff *skb;
1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1557 struct l2cap_hdr *lh;
1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1561 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen,
1563 msg->msg_flags & MSG_DONTWAIT, &err);
1564 if (!skb)
1565 return ERR_PTR(err);
1567 skb->priority = priority;
1569 /* Create L2CAP header */
1570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1571 lh->cid = cpu_to_le16(chan->dcid);
1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1573 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1576 if (unlikely(err < 0)) {
1577 kfree_skb(skb);
1578 return ERR_PTR(err);
1580 return skb;
1583 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1584 struct msghdr *msg, size_t len,
1585 u32 priority)
1587 struct sock *sk = chan->sk;
1588 struct l2cap_conn *conn = chan->conn;
1589 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh;
1593 BT_DBG("sk %p len %d", sk, (int)len);
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (!skb)
1599 return ERR_PTR(err);
1601 skb->priority = priority;
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1609 if (unlikely(err < 0)) {
1610 kfree_skb(skb);
1611 return ERR_PTR(err);
1613 return skb;
1616 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1617 struct msghdr *msg, size_t len,
1618 u32 control, u16 sdulen)
1620 struct sock *sk = chan->sk;
1621 struct l2cap_conn *conn = chan->conn;
1622 struct sk_buff *skb;
1623 int err, count, hlen;
1624 struct l2cap_hdr *lh;
1626 BT_DBG("sk %p len %d", sk, (int)len);
1628 if (!conn)
1629 return ERR_PTR(-ENOTCONN);
1631 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1632 hlen = L2CAP_EXT_HDR_SIZE;
1633 else
1634 hlen = L2CAP_ENH_HDR_SIZE;
1636 if (sdulen)
1637 hlen += L2CAP_SDULEN_SIZE;
1639 if (chan->fcs == L2CAP_FCS_CRC16)
1640 hlen += L2CAP_FCS_SIZE;
1642 count = min_t(unsigned int, (conn->mtu - hlen), len);
1643 skb = bt_skb_send_alloc(sk, count + hlen,
1644 msg->msg_flags & MSG_DONTWAIT, &err);
1645 if (!skb)
1646 return ERR_PTR(err);
1648 /* Create L2CAP header */
1649 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1650 lh->cid = cpu_to_le16(chan->dcid);
1651 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1653 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1655 if (sdulen)
1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1659 if (unlikely(err < 0)) {
1660 kfree_skb(skb);
1661 return ERR_PTR(err);
1664 if (chan->fcs == L2CAP_FCS_CRC16)
1665 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1667 bt_cb(skb)->retries = 0;
1668 return skb;
1671 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1673 struct sk_buff *skb;
1674 struct sk_buff_head sar_queue;
1675 u32 control;
1676 size_t size = 0;
1678 skb_queue_head_init(&sar_queue);
1679 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1680 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1681 if (IS_ERR(skb))
1682 return PTR_ERR(skb);
1684 __skb_queue_tail(&sar_queue, skb);
1685 len -= chan->remote_mps;
1686 size += chan->remote_mps;
1688 while (len > 0) {
1689 size_t buflen;
1691 if (len > chan->remote_mps) {
1692 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1693 buflen = chan->remote_mps;
1694 } else {
1695 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1696 buflen = len;
1699 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1700 if (IS_ERR(skb)) {
1701 skb_queue_purge(&sar_queue);
1702 return PTR_ERR(skb);
1705 __skb_queue_tail(&sar_queue, skb);
1706 len -= buflen;
1707 size += buflen;
1709 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1710 if (chan->tx_send_head == NULL)
1711 chan->tx_send_head = sar_queue.next;
1713 return size;
1716 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1717 u32 priority)
1719 struct sk_buff *skb;
1720 u32 control;
1721 int err;
1723 /* Connectionless channel */
1724 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1725 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1726 if (IS_ERR(skb))
1727 return PTR_ERR(skb);
1729 l2cap_do_send(chan, skb);
1730 return len;
1733 switch (chan->mode) {
1734 case L2CAP_MODE_BASIC:
1735 /* Check outgoing MTU */
1736 if (len > chan->omtu)
1737 return -EMSGSIZE;
1739 /* Create a basic PDU */
1740 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1741 if (IS_ERR(skb))
1742 return PTR_ERR(skb);
1744 l2cap_do_send(chan, skb);
1745 err = len;
1746 break;
1748 case L2CAP_MODE_ERTM:
1749 case L2CAP_MODE_STREAMING:
1750 /* Entire SDU fits into one PDU */
1751 if (len <= chan->remote_mps) {
1752 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1753 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1758 __skb_queue_tail(&chan->tx_q, skb);
1760 if (chan->tx_send_head == NULL)
1761 chan->tx_send_head = skb;
1763 } else {
1764 /* Segment SDU into multiples PDUs */
1765 err = l2cap_sar_segment_sdu(chan, msg, len);
1766 if (err < 0)
1767 return err;
1770 if (chan->mode == L2CAP_MODE_STREAMING) {
1771 l2cap_streaming_send(chan);
1772 err = len;
1773 break;
1776 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1777 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1778 err = len;
1779 break;
1782 err = l2cap_ertm_send(chan);
1783 if (err >= 0)
1784 err = len;
1786 break;
1788 default:
1789 BT_DBG("bad state %1.1x", chan->mode);
1790 err = -EBADFD;
1793 return err;
1796 /* Copy frame to all raw sockets on that connection */
1797 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1799 struct sk_buff *nskb;
1800 struct l2cap_chan *chan;
1802 BT_DBG("conn %p", conn);
1804 rcu_read_lock();
1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1807 struct sock *sk = chan->sk;
1808 if (chan->chan_type != L2CAP_CHAN_RAW)
1809 continue;
1811 /* Don't send frame to the socket it came from */
1812 if (skb->sk == sk)
1813 continue;
1814 nskb = skb_clone(skb, GFP_ATOMIC);
1815 if (!nskb)
1816 continue;
1818 if (chan->ops->recv(chan->data, nskb))
1819 kfree_skb(nskb);
1822 rcu_read_unlock();
1825 /* ---- L2CAP signalling commands ---- */
1826 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1827 u8 code, u8 ident, u16 dlen, void *data)
1829 struct sk_buff *skb, **frag;
1830 struct l2cap_cmd_hdr *cmd;
1831 struct l2cap_hdr *lh;
1832 int len, count;
1834 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1835 conn, code, ident, dlen);
1837 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1838 count = min_t(unsigned int, conn->mtu, len);
1840 skb = bt_skb_alloc(count, GFP_ATOMIC);
1841 if (!skb)
1842 return NULL;
1844 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1845 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1847 if (conn->hcon->type == LE_LINK)
1848 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1849 else
1850 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1852 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1853 cmd->code = code;
1854 cmd->ident = ident;
1855 cmd->len = cpu_to_le16(dlen);
1857 if (dlen) {
1858 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1859 memcpy(skb_put(skb, count), data, count);
1860 data += count;
1863 len -= skb->len;
1865 /* Continuation fragments (no L2CAP header) */
1866 frag = &skb_shinfo(skb)->frag_list;
1867 while (len) {
1868 count = min_t(unsigned int, conn->mtu, len);
1870 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1871 if (!*frag)
1872 goto fail;
1874 memcpy(skb_put(*frag, count), data, count);
1876 len -= count;
1877 data += count;
1879 frag = &(*frag)->next;
1882 return skb;
1884 fail:
1885 kfree_skb(skb);
1886 return NULL;
1889 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1891 struct l2cap_conf_opt *opt = *ptr;
1892 int len;
1894 len = L2CAP_CONF_OPT_SIZE + opt->len;
1895 *ptr += len;
1897 *type = opt->type;
1898 *olen = opt->len;
1900 switch (opt->len) {
1901 case 1:
1902 *val = *((u8 *) opt->val);
1903 break;
1905 case 2:
1906 *val = get_unaligned_le16(opt->val);
1907 break;
1909 case 4:
1910 *val = get_unaligned_le32(opt->val);
1911 break;
1913 default:
1914 *val = (unsigned long) opt->val;
1915 break;
1918 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1919 return len;
1922 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1924 struct l2cap_conf_opt *opt = *ptr;
1926 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1928 opt->type = type;
1929 opt->len = len;
1931 switch (len) {
1932 case 1:
1933 *((u8 *) opt->val) = val;
1934 break;
1936 case 2:
1937 put_unaligned_le16(val, opt->val);
1938 break;
1940 case 4:
1941 put_unaligned_le32(val, opt->val);
1942 break;
1944 default:
1945 memcpy(opt->val, (void *) val, len);
1946 break;
1949 *ptr += L2CAP_CONF_OPT_SIZE + len;
1952 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1954 struct l2cap_conf_efs efs;
1956 switch (chan->mode) {
1957 case L2CAP_MODE_ERTM:
1958 efs.id = chan->local_id;
1959 efs.stype = chan->local_stype;
1960 efs.msdu = cpu_to_le16(chan->local_msdu);
1961 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1962 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1963 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1964 break;
1966 case L2CAP_MODE_STREAMING:
1967 efs.id = 1;
1968 efs.stype = L2CAP_SERV_BESTEFFORT;
1969 efs.msdu = cpu_to_le16(chan->local_msdu);
1970 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1971 efs.acc_lat = 0;
1972 efs.flush_to = 0;
1973 break;
1975 default:
1976 return;
1979 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1980 (unsigned long) &efs);
1983 static void l2cap_ack_timeout(struct work_struct *work)
1985 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1986 ack_timer.work);
1988 BT_DBG("chan %p", chan);
1990 lock_sock(chan->sk);
1991 l2cap_send_ack(chan);
1992 release_sock(chan->sk);
1995 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1997 chan->expected_ack_seq = 0;
1998 chan->unacked_frames = 0;
1999 chan->buffer_seq = 0;
2000 chan->num_acked = 0;
2001 chan->frames_sent = 0;
2003 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2004 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2005 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2007 skb_queue_head_init(&chan->srej_q);
2009 INIT_LIST_HEAD(&chan->srej_l);
2012 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2014 switch (mode) {
2015 case L2CAP_MODE_STREAMING:
2016 case L2CAP_MODE_ERTM:
2017 if (l2cap_mode_supported(mode, remote_feat_mask))
2018 return mode;
2019 /* fall through */
2020 default:
2021 return L2CAP_MODE_BASIC;
2025 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2027 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2030 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2032 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2035 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2037 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2038 __l2cap_ews_supported(chan)) {
2039 /* use extended control field */
2040 set_bit(FLAG_EXT_CTRL, &chan->flags);
2041 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2042 } else {
2043 chan->tx_win = min_t(u16, chan->tx_win,
2044 L2CAP_DEFAULT_TX_WINDOW);
2045 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2049 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2051 struct l2cap_conf_req *req = data;
2052 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2053 void *ptr = req->data;
2054 u16 size;
2056 BT_DBG("chan %p", chan);
2058 if (chan->num_conf_req || chan->num_conf_rsp)
2059 goto done;
2061 switch (chan->mode) {
2062 case L2CAP_MODE_STREAMING:
2063 case L2CAP_MODE_ERTM:
2064 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2065 break;
2067 if (__l2cap_efs_supported(chan))
2068 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2070 /* fall through */
2071 default:
2072 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2073 break;
2076 done:
2077 if (chan->imtu != L2CAP_DEFAULT_MTU)
2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2080 switch (chan->mode) {
2081 case L2CAP_MODE_BASIC:
2082 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2083 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2084 break;
2086 rfc.mode = L2CAP_MODE_BASIC;
2087 rfc.txwin_size = 0;
2088 rfc.max_transmit = 0;
2089 rfc.retrans_timeout = 0;
2090 rfc.monitor_timeout = 0;
2091 rfc.max_pdu_size = 0;
2093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2094 (unsigned long) &rfc);
2095 break;
2097 case L2CAP_MODE_ERTM:
2098 rfc.mode = L2CAP_MODE_ERTM;
2099 rfc.max_transmit = chan->max_tx;
2100 rfc.retrans_timeout = 0;
2101 rfc.monitor_timeout = 0;
2103 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2104 L2CAP_EXT_HDR_SIZE -
2105 L2CAP_SDULEN_SIZE -
2106 L2CAP_FCS_SIZE);
2107 rfc.max_pdu_size = cpu_to_le16(size);
2109 l2cap_txwin_setup(chan);
2111 rfc.txwin_size = min_t(u16, chan->tx_win,
2112 L2CAP_DEFAULT_TX_WINDOW);
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2115 (unsigned long) &rfc);
2117 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2118 l2cap_add_opt_efs(&ptr, chan);
2120 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2121 break;
2123 if (chan->fcs == L2CAP_FCS_NONE ||
2124 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2125 chan->fcs = L2CAP_FCS_NONE;
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2129 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2131 chan->tx_win);
2132 break;
2134 case L2CAP_MODE_STREAMING:
2135 rfc.mode = L2CAP_MODE_STREAMING;
2136 rfc.txwin_size = 0;
2137 rfc.max_transmit = 0;
2138 rfc.retrans_timeout = 0;
2139 rfc.monitor_timeout = 0;
2141 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2142 L2CAP_EXT_HDR_SIZE -
2143 L2CAP_SDULEN_SIZE -
2144 L2CAP_FCS_SIZE);
2145 rfc.max_pdu_size = cpu_to_le16(size);
2147 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2148 (unsigned long) &rfc);
2150 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2151 l2cap_add_opt_efs(&ptr, chan);
2153 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2154 break;
2156 if (chan->fcs == L2CAP_FCS_NONE ||
2157 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2158 chan->fcs = L2CAP_FCS_NONE;
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2161 break;
2164 req->dcid = cpu_to_le16(chan->dcid);
2165 req->flags = cpu_to_le16(0);
2167 return ptr - data;
2170 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2172 struct l2cap_conf_rsp *rsp = data;
2173 void *ptr = rsp->data;
2174 void *req = chan->conf_req;
2175 int len = chan->conf_len;
2176 int type, hint, olen;
2177 unsigned long val;
2178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2179 struct l2cap_conf_efs efs;
2180 u8 remote_efs = 0;
2181 u16 mtu = L2CAP_DEFAULT_MTU;
2182 u16 result = L2CAP_CONF_SUCCESS;
2183 u16 size;
2185 BT_DBG("chan %p", chan);
2187 while (len >= L2CAP_CONF_OPT_SIZE) {
2188 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2190 hint = type & L2CAP_CONF_HINT;
2191 type &= L2CAP_CONF_MASK;
2193 switch (type) {
2194 case L2CAP_CONF_MTU:
2195 mtu = val;
2196 break;
2198 case L2CAP_CONF_FLUSH_TO:
2199 chan->flush_to = val;
2200 break;
2202 case L2CAP_CONF_QOS:
2203 break;
2205 case L2CAP_CONF_RFC:
2206 if (olen == sizeof(rfc))
2207 memcpy(&rfc, (void *) val, olen);
2208 break;
2210 case L2CAP_CONF_FCS:
2211 if (val == L2CAP_FCS_NONE)
2212 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2213 break;
2215 case L2CAP_CONF_EFS:
2216 remote_efs = 1;
2217 if (olen == sizeof(efs))
2218 memcpy(&efs, (void *) val, olen);
2219 break;
2221 case L2CAP_CONF_EWS:
2222 if (!enable_hs)
2223 return -ECONNREFUSED;
2225 set_bit(FLAG_EXT_CTRL, &chan->flags);
2226 set_bit(CONF_EWS_RECV, &chan->conf_state);
2227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2228 chan->remote_tx_win = val;
2229 break;
2231 default:
2232 if (hint)
2233 break;
2235 result = L2CAP_CONF_UNKNOWN;
2236 *((u8 *) ptr++) = type;
2237 break;
2241 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2242 goto done;
2244 switch (chan->mode) {
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2248 chan->mode = l2cap_select_mode(rfc.mode,
2249 chan->conn->feat_mask);
2250 break;
2253 if (remote_efs) {
2254 if (__l2cap_efs_supported(chan))
2255 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2256 else
2257 return -ECONNREFUSED;
2260 if (chan->mode != rfc.mode)
2261 return -ECONNREFUSED;
2263 break;
2266 done:
2267 if (chan->mode != rfc.mode) {
2268 result = L2CAP_CONF_UNACCEPT;
2269 rfc.mode = chan->mode;
2271 if (chan->num_conf_rsp == 1)
2272 return -ECONNREFUSED;
2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2275 sizeof(rfc), (unsigned long) &rfc);
2278 if (result == L2CAP_CONF_SUCCESS) {
2279 /* Configure output options and let the other side know
2280 * which ones we don't like. */
2282 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2283 result = L2CAP_CONF_UNACCEPT;
2284 else {
2285 chan->omtu = mtu;
2286 set_bit(CONF_MTU_DONE, &chan->conf_state);
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2290 if (remote_efs) {
2291 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2292 efs.stype != L2CAP_SERV_NOTRAFIC &&
2293 efs.stype != chan->local_stype) {
2295 result = L2CAP_CONF_UNACCEPT;
2297 if (chan->num_conf_req >= 1)
2298 return -ECONNREFUSED;
2300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2301 sizeof(efs),
2302 (unsigned long) &efs);
2303 } else {
2304 /* Send PENDING Conf Rsp */
2305 result = L2CAP_CONF_PENDING;
2306 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2310 switch (rfc.mode) {
2311 case L2CAP_MODE_BASIC:
2312 chan->fcs = L2CAP_FCS_NONE;
2313 set_bit(CONF_MODE_DONE, &chan->conf_state);
2314 break;
2316 case L2CAP_MODE_ERTM:
2317 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2318 chan->remote_tx_win = rfc.txwin_size;
2319 else
2320 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2322 chan->remote_max_tx = rfc.max_transmit;
2324 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2325 chan->conn->mtu -
2326 L2CAP_EXT_HDR_SIZE -
2327 L2CAP_SDULEN_SIZE -
2328 L2CAP_FCS_SIZE);
2329 rfc.max_pdu_size = cpu_to_le16(size);
2330 chan->remote_mps = size;
2332 rfc.retrans_timeout =
2333 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2334 rfc.monitor_timeout =
2335 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2337 set_bit(CONF_MODE_DONE, &chan->conf_state);
2339 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2340 sizeof(rfc), (unsigned long) &rfc);
2342 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2343 chan->remote_id = efs.id;
2344 chan->remote_stype = efs.stype;
2345 chan->remote_msdu = le16_to_cpu(efs.msdu);
2346 chan->remote_flush_to =
2347 le32_to_cpu(efs.flush_to);
2348 chan->remote_acc_lat =
2349 le32_to_cpu(efs.acc_lat);
2350 chan->remote_sdu_itime =
2351 le32_to_cpu(efs.sdu_itime);
2352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2353 sizeof(efs), (unsigned long) &efs);
2355 break;
2357 case L2CAP_MODE_STREAMING:
2358 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2359 chan->conn->mtu -
2360 L2CAP_EXT_HDR_SIZE -
2361 L2CAP_SDULEN_SIZE -
2362 L2CAP_FCS_SIZE);
2363 rfc.max_pdu_size = cpu_to_le16(size);
2364 chan->remote_mps = size;
2366 set_bit(CONF_MODE_DONE, &chan->conf_state);
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2369 sizeof(rfc), (unsigned long) &rfc);
2371 break;
2373 default:
2374 result = L2CAP_CONF_UNACCEPT;
2376 memset(&rfc, 0, sizeof(rfc));
2377 rfc.mode = chan->mode;
2380 if (result == L2CAP_CONF_SUCCESS)
2381 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2383 rsp->scid = cpu_to_le16(chan->dcid);
2384 rsp->result = cpu_to_le16(result);
2385 rsp->flags = cpu_to_le16(0x0000);
2387 return ptr - data;
2390 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2392 struct l2cap_conf_req *req = data;
2393 void *ptr = req->data;
2394 int type, olen;
2395 unsigned long val;
2396 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2397 struct l2cap_conf_efs efs;
2399 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2401 while (len >= L2CAP_CONF_OPT_SIZE) {
2402 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2404 switch (type) {
2405 case L2CAP_CONF_MTU:
2406 if (val < L2CAP_DEFAULT_MIN_MTU) {
2407 *result = L2CAP_CONF_UNACCEPT;
2408 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2409 } else
2410 chan->imtu = val;
2411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2412 break;
2414 case L2CAP_CONF_FLUSH_TO:
2415 chan->flush_to = val;
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2417 2, chan->flush_to);
2418 break;
2420 case L2CAP_CONF_RFC:
2421 if (olen == sizeof(rfc))
2422 memcpy(&rfc, (void *)val, olen);
2424 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2425 rfc.mode != chan->mode)
2426 return -ECONNREFUSED;
2428 chan->fcs = 0;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2431 sizeof(rfc), (unsigned long) &rfc);
2432 break;
2434 case L2CAP_CONF_EWS:
2435 chan->tx_win = min_t(u16, val,
2436 L2CAP_DEFAULT_EXT_WINDOW);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2438 chan->tx_win);
2439 break;
2441 case L2CAP_CONF_EFS:
2442 if (olen == sizeof(efs))
2443 memcpy(&efs, (void *)val, olen);
2445 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2446 efs.stype != L2CAP_SERV_NOTRAFIC &&
2447 efs.stype != chan->local_stype)
2448 return -ECONNREFUSED;
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2451 sizeof(efs), (unsigned long) &efs);
2452 break;
2456 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2457 return -ECONNREFUSED;
2459 chan->mode = rfc.mode;
2461 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2462 switch (rfc.mode) {
2463 case L2CAP_MODE_ERTM:
2464 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2465 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2466 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2468 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2469 chan->local_msdu = le16_to_cpu(efs.msdu);
2470 chan->local_sdu_itime =
2471 le32_to_cpu(efs.sdu_itime);
2472 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2473 chan->local_flush_to =
2474 le32_to_cpu(efs.flush_to);
2476 break;
2478 case L2CAP_MODE_STREAMING:
2479 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2483 req->dcid = cpu_to_le16(chan->dcid);
2484 req->flags = cpu_to_le16(0x0000);
2486 return ptr - data;
2489 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2491 struct l2cap_conf_rsp *rsp = data;
2492 void *ptr = rsp->data;
2494 BT_DBG("chan %p", chan);
2496 rsp->scid = cpu_to_le16(chan->dcid);
2497 rsp->result = cpu_to_le16(result);
2498 rsp->flags = cpu_to_le16(flags);
2500 return ptr - data;
2503 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2505 struct l2cap_conn_rsp rsp;
2506 struct l2cap_conn *conn = chan->conn;
2507 u8 buf[128];
2509 rsp.scid = cpu_to_le16(chan->dcid);
2510 rsp.dcid = cpu_to_le16(chan->scid);
2511 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2512 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2513 l2cap_send_cmd(conn, chan->ident,
2514 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2517 return;
2519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2520 l2cap_build_conf_req(chan, buf), buf);
2521 chan->num_conf_req++;
2524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2526 int type, olen;
2527 unsigned long val;
2528 struct l2cap_conf_rfc rfc;
2530 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2532 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2533 return;
2535 while (len >= L2CAP_CONF_OPT_SIZE) {
2536 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2538 switch (type) {
2539 case L2CAP_CONF_RFC:
2540 if (olen == sizeof(rfc))
2541 memcpy(&rfc, (void *)val, olen);
2542 goto done;
2546 /* Use sane default values in case a misbehaving remote device
2547 * did not send an RFC option.
2549 rfc.mode = chan->mode;
2550 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2551 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2552 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2554 BT_ERR("Expected RFC option was not found, using defaults");
2556 done:
2557 switch (rfc.mode) {
2558 case L2CAP_MODE_ERTM:
2559 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2560 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2561 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2562 break;
2563 case L2CAP_MODE_STREAMING:
2564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2568 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2570 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2572 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2573 return 0;
2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2576 cmd->ident == conn->info_ident) {
2577 __cancel_delayed_work(&conn->info_timer);
2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2580 conn->info_ident = 0;
2582 l2cap_conn_start(conn);
2585 return 0;
2588 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2590 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2591 struct l2cap_conn_rsp rsp;
2592 struct l2cap_chan *chan = NULL, *pchan;
2593 struct sock *parent, *sk = NULL;
2594 int result, status = L2CAP_CS_NO_INFO;
2596 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2597 __le16 psm = req->psm;
2599 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2601 /* Check if we have socket listening on psm */
2602 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2603 if (!pchan) {
2604 result = L2CAP_CR_BAD_PSM;
2605 goto sendresp;
2608 parent = pchan->sk;
2610 lock_sock(parent);
2612 /* Check if the ACL is secure enough (if not SDP) */
2613 if (psm != cpu_to_le16(0x0001) &&
2614 !hci_conn_check_link_mode(conn->hcon)) {
2615 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2616 result = L2CAP_CR_SEC_BLOCK;
2617 goto response;
2620 result = L2CAP_CR_NO_MEM;
2622 /* Check for backlog size */
2623 if (sk_acceptq_is_full(parent)) {
2624 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2625 goto response;
2628 chan = pchan->ops->new_connection(pchan->data);
2629 if (!chan)
2630 goto response;
2632 sk = chan->sk;
2634 /* Check if we already have channel with that dcid */
2635 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2636 sock_set_flag(sk, SOCK_ZAPPED);
2637 chan->ops->close(chan->data);
2638 goto response;
2641 hci_conn_hold(conn->hcon);
2643 bacpy(&bt_sk(sk)->src, conn->src);
2644 bacpy(&bt_sk(sk)->dst, conn->dst);
2645 chan->psm = psm;
2646 chan->dcid = scid;
2648 bt_accept_enqueue(parent, sk);
2650 l2cap_chan_add(conn, chan);
2652 dcid = chan->scid;
2654 __set_chan_timer(chan, sk->sk_sndtimeo);
2656 chan->ident = cmd->ident;
2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2659 if (l2cap_chan_check_security(chan)) {
2660 if (bt_sk(sk)->defer_setup) {
2661 l2cap_state_change(chan, BT_CONNECT2);
2662 result = L2CAP_CR_PEND;
2663 status = L2CAP_CS_AUTHOR_PEND;
2664 parent->sk_data_ready(parent, 0);
2665 } else {
2666 l2cap_state_change(chan, BT_CONFIG);
2667 result = L2CAP_CR_SUCCESS;
2668 status = L2CAP_CS_NO_INFO;
2670 } else {
2671 l2cap_state_change(chan, BT_CONNECT2);
2672 result = L2CAP_CR_PEND;
2673 status = L2CAP_CS_AUTHEN_PEND;
2675 } else {
2676 l2cap_state_change(chan, BT_CONNECT2);
2677 result = L2CAP_CR_PEND;
2678 status = L2CAP_CS_NO_INFO;
2681 response:
2682 release_sock(parent);
2684 sendresp:
2685 rsp.scid = cpu_to_le16(scid);
2686 rsp.dcid = cpu_to_le16(dcid);
2687 rsp.result = cpu_to_le16(result);
2688 rsp.status = cpu_to_le16(status);
2689 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2691 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2692 struct l2cap_info_req info;
2693 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2696 conn->info_ident = l2cap_get_ident(conn);
2698 schedule_delayed_work(&conn->info_timer,
2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2701 l2cap_send_cmd(conn, conn->info_ident,
2702 L2CAP_INFO_REQ, sizeof(info), &info);
2705 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2706 result == L2CAP_CR_SUCCESS) {
2707 u8 buf[128];
2708 set_bit(CONF_REQ_SENT, &chan->conf_state);
2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2710 l2cap_build_conf_req(chan, buf), buf);
2711 chan->num_conf_req++;
2714 return 0;
2717 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2720 u16 scid, dcid, result, status;
2721 struct l2cap_chan *chan;
2722 struct sock *sk;
2723 u8 req[128];
2725 scid = __le16_to_cpu(rsp->scid);
2726 dcid = __le16_to_cpu(rsp->dcid);
2727 result = __le16_to_cpu(rsp->result);
2728 status = __le16_to_cpu(rsp->status);
2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2732 if (scid) {
2733 chan = l2cap_get_chan_by_scid(conn, scid);
2734 if (!chan)
2735 return -EFAULT;
2736 } else {
2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2738 if (!chan)
2739 return -EFAULT;
2742 sk = chan->sk;
2744 switch (result) {
2745 case L2CAP_CR_SUCCESS:
2746 l2cap_state_change(chan, BT_CONFIG);
2747 chan->ident = 0;
2748 chan->dcid = dcid;
2749 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2751 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2752 break;
2754 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2755 l2cap_build_conf_req(chan, req), req);
2756 chan->num_conf_req++;
2757 break;
2759 case L2CAP_CR_PEND:
2760 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2761 break;
2763 default:
2764 l2cap_chan_del(chan, ECONNREFUSED);
2765 break;
2768 release_sock(sk);
2769 return 0;
2772 static inline void set_default_fcs(struct l2cap_chan *chan)
2774 /* FCS is enabled only in ERTM or streaming mode, if one or both
2775 * sides request it.
2777 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2778 chan->fcs = L2CAP_FCS_NONE;
2779 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2780 chan->fcs = L2CAP_FCS_CRC16;
2783 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2785 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2786 u16 dcid, flags;
2787 u8 rsp[64];
2788 struct l2cap_chan *chan;
2789 struct sock *sk;
2790 int len;
2792 dcid = __le16_to_cpu(req->dcid);
2793 flags = __le16_to_cpu(req->flags);
2795 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2797 chan = l2cap_get_chan_by_scid(conn, dcid);
2798 if (!chan)
2799 return -ENOENT;
2801 sk = chan->sk;
2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2804 struct l2cap_cmd_rej_cid rej;
2806 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2807 rej.scid = cpu_to_le16(chan->scid);
2808 rej.dcid = cpu_to_le16(chan->dcid);
2810 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2811 sizeof(rej), &rej);
2812 goto unlock;
2815 /* Reject if config buffer is too small. */
2816 len = cmd_len - sizeof(*req);
2817 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_REJECT, flags), rsp);
2821 goto unlock;
2824 /* Store config. */
2825 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2826 chan->conf_len += len;
2828 if (flags & 0x0001) {
2829 /* Incomplete config. Send empty response. */
2830 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2831 l2cap_build_conf_rsp(chan, rsp,
2832 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2833 goto unlock;
2836 /* Complete config. */
2837 len = l2cap_parse_conf_req(chan, rsp);
2838 if (len < 0) {
2839 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2840 goto unlock;
2843 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2844 chan->num_conf_rsp++;
2846 /* Reset config buffer. */
2847 chan->conf_len = 0;
2849 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2850 goto unlock;
2852 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2853 set_default_fcs(chan);
2855 l2cap_state_change(chan, BT_CONNECTED);
2857 chan->next_tx_seq = 0;
2858 chan->expected_tx_seq = 0;
2859 skb_queue_head_init(&chan->tx_q);
2860 if (chan->mode == L2CAP_MODE_ERTM)
2861 l2cap_ertm_init(chan);
2863 l2cap_chan_ready(sk);
2864 goto unlock;
2867 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2868 u8 buf[64];
2869 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2870 l2cap_build_conf_req(chan, buf), buf);
2871 chan->num_conf_req++;
2874 /* Got Conf Rsp PENDING from remote side and asume we sent
2875 Conf Rsp PENDING in the code above */
2876 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2877 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2879 /* check compatibility */
2881 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2882 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2885 l2cap_build_conf_rsp(chan, rsp,
2886 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2889 unlock:
2890 release_sock(sk);
2891 return 0;
2894 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2897 u16 scid, flags, result;
2898 struct l2cap_chan *chan;
2899 struct sock *sk;
2900 int len = cmd->len - sizeof(*rsp);
2902 scid = __le16_to_cpu(rsp->scid);
2903 flags = __le16_to_cpu(rsp->flags);
2904 result = __le16_to_cpu(rsp->result);
2906 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2907 scid, flags, result);
2909 chan = l2cap_get_chan_by_scid(conn, scid);
2910 if (!chan)
2911 return 0;
2913 sk = chan->sk;
2915 switch (result) {
2916 case L2CAP_CONF_SUCCESS:
2917 l2cap_conf_rfc_get(chan, rsp->data, len);
2918 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2919 break;
2921 case L2CAP_CONF_PENDING:
2922 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2924 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2925 char buf[64];
2927 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2928 buf, &result);
2929 if (len < 0) {
2930 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2931 goto done;
2934 /* check compatibility */
2936 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2937 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2939 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2940 l2cap_build_conf_rsp(chan, buf,
2941 L2CAP_CONF_SUCCESS, 0x0000), buf);
2943 goto done;
2945 case L2CAP_CONF_UNACCEPT:
2946 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2947 char req[64];
2949 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2950 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2951 goto done;
2954 /* throw out any old stored conf requests */
2955 result = L2CAP_CONF_SUCCESS;
2956 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2957 req, &result);
2958 if (len < 0) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2960 goto done;
2963 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2964 L2CAP_CONF_REQ, len, req);
2965 chan->num_conf_req++;
2966 if (result != L2CAP_CONF_SUCCESS)
2967 goto done;
2968 break;
2971 default:
2972 sk->sk_err = ECONNRESET;
2973 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2974 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2975 goto done;
2978 if (flags & 0x01)
2979 goto done;
2981 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2983 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2984 set_default_fcs(chan);
2986 l2cap_state_change(chan, BT_CONNECTED);
2987 chan->next_tx_seq = 0;
2988 chan->expected_tx_seq = 0;
2989 skb_queue_head_init(&chan->tx_q);
2990 if (chan->mode == L2CAP_MODE_ERTM)
2991 l2cap_ertm_init(chan);
2993 l2cap_chan_ready(sk);
2996 done:
2997 release_sock(sk);
2998 return 0;
3001 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3004 struct l2cap_disconn_rsp rsp;
3005 u16 dcid, scid;
3006 struct l2cap_chan *chan;
3007 struct sock *sk;
3009 scid = __le16_to_cpu(req->scid);
3010 dcid = __le16_to_cpu(req->dcid);
3012 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3014 chan = l2cap_get_chan_by_scid(conn, dcid);
3015 if (!chan)
3016 return 0;
3018 sk = chan->sk;
3020 rsp.dcid = cpu_to_le16(chan->scid);
3021 rsp.scid = cpu_to_le16(chan->dcid);
3022 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3024 sk->sk_shutdown = SHUTDOWN_MASK;
3026 l2cap_chan_del(chan, ECONNRESET);
3027 release_sock(sk);
3029 chan->ops->close(chan->data);
3030 return 0;
3033 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3035 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3036 u16 dcid, scid;
3037 struct l2cap_chan *chan;
3038 struct sock *sk;
3040 scid = __le16_to_cpu(rsp->scid);
3041 dcid = __le16_to_cpu(rsp->dcid);
3043 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3045 chan = l2cap_get_chan_by_scid(conn, scid);
3046 if (!chan)
3047 return 0;
3049 sk = chan->sk;
3051 l2cap_chan_del(chan, 0);
3052 release_sock(sk);
3054 chan->ops->close(chan->data);
3055 return 0;
3058 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3060 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3061 u16 type;
3063 type = __le16_to_cpu(req->type);
3065 BT_DBG("type 0x%4.4x", type);
3067 if (type == L2CAP_IT_FEAT_MASK) {
3068 u8 buf[8];
3069 u32 feat_mask = l2cap_feat_mask;
3070 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3071 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3072 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3073 if (!disable_ertm)
3074 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3075 | L2CAP_FEAT_FCS;
3076 if (enable_hs)
3077 feat_mask |= L2CAP_FEAT_EXT_FLOW
3078 | L2CAP_FEAT_EXT_WINDOW;
3080 put_unaligned_le32(feat_mask, rsp->data);
3081 l2cap_send_cmd(conn, cmd->ident,
3082 L2CAP_INFO_RSP, sizeof(buf), buf);
3083 } else if (type == L2CAP_IT_FIXED_CHAN) {
3084 u8 buf[12];
3085 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3087 if (enable_hs)
3088 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3089 else
3090 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3092 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3093 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3094 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3095 l2cap_send_cmd(conn, cmd->ident,
3096 L2CAP_INFO_RSP, sizeof(buf), buf);
3097 } else {
3098 struct l2cap_info_rsp rsp;
3099 rsp.type = cpu_to_le16(type);
3100 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3101 l2cap_send_cmd(conn, cmd->ident,
3102 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3105 return 0;
3108 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3110 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3111 u16 type, result;
3113 type = __le16_to_cpu(rsp->type);
3114 result = __le16_to_cpu(rsp->result);
3116 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3118 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3119 if (cmd->ident != conn->info_ident ||
3120 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3121 return 0;
3123 __cancel_delayed_work(&conn->info_timer);
3125 if (result != L2CAP_IR_SUCCESS) {
3126 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3127 conn->info_ident = 0;
3129 l2cap_conn_start(conn);
3131 return 0;
3134 if (type == L2CAP_IT_FEAT_MASK) {
3135 conn->feat_mask = get_unaligned_le32(rsp->data);
3137 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3138 struct l2cap_info_req req;
3139 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3141 conn->info_ident = l2cap_get_ident(conn);
3143 l2cap_send_cmd(conn, conn->info_ident,
3144 L2CAP_INFO_REQ, sizeof(req), &req);
3145 } else {
3146 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3147 conn->info_ident = 0;
3149 l2cap_conn_start(conn);
3151 } else if (type == L2CAP_IT_FIXED_CHAN) {
3152 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3153 conn->info_ident = 0;
3155 l2cap_conn_start(conn);
3158 return 0;
3161 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3162 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3163 void *data)
3165 struct l2cap_create_chan_req *req = data;
3166 struct l2cap_create_chan_rsp rsp;
3167 u16 psm, scid;
3169 if (cmd_len != sizeof(*req))
3170 return -EPROTO;
3172 if (!enable_hs)
3173 return -EINVAL;
3175 psm = le16_to_cpu(req->psm);
3176 scid = le16_to_cpu(req->scid);
3178 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3180 /* Placeholder: Always reject */
3181 rsp.dcid = 0;
3182 rsp.scid = cpu_to_le16(scid);
3183 rsp.result = L2CAP_CR_NO_MEM;
3184 rsp.status = L2CAP_CS_NO_INFO;
3186 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3187 sizeof(rsp), &rsp);
3189 return 0;
3192 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3193 struct l2cap_cmd_hdr *cmd, void *data)
3195 BT_DBG("conn %p", conn);
3197 return l2cap_connect_rsp(conn, cmd, data);
3200 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3201 u16 icid, u16 result)
3203 struct l2cap_move_chan_rsp rsp;
3205 BT_DBG("icid %d, result %d", icid, result);
3207 rsp.icid = cpu_to_le16(icid);
3208 rsp.result = cpu_to_le16(result);
3210 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3213 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3214 struct l2cap_chan *chan, u16 icid, u16 result)
3216 struct l2cap_move_chan_cfm cfm;
3217 u8 ident;
3219 BT_DBG("icid %d, result %d", icid, result);
3221 ident = l2cap_get_ident(conn);
3222 if (chan)
3223 chan->ident = ident;
3225 cfm.icid = cpu_to_le16(icid);
3226 cfm.result = cpu_to_le16(result);
3228 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3231 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3232 u16 icid)
3234 struct l2cap_move_chan_cfm_rsp rsp;
3236 BT_DBG("icid %d", icid);
3238 rsp.icid = cpu_to_le16(icid);
3239 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3242 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3243 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3245 struct l2cap_move_chan_req *req = data;
3246 u16 icid = 0;
3247 u16 result = L2CAP_MR_NOT_ALLOWED;
3249 if (cmd_len != sizeof(*req))
3250 return -EPROTO;
3252 icid = le16_to_cpu(req->icid);
3254 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3256 if (!enable_hs)
3257 return -EINVAL;
3259 /* Placeholder: Always refuse */
3260 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3262 return 0;
3265 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3266 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3268 struct l2cap_move_chan_rsp *rsp = data;
3269 u16 icid, result;
3271 if (cmd_len != sizeof(*rsp))
3272 return -EPROTO;
3274 icid = le16_to_cpu(rsp->icid);
3275 result = le16_to_cpu(rsp->result);
3277 BT_DBG("icid %d, result %d", icid, result);
3279 /* Placeholder: Always unconfirmed */
3280 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3282 return 0;
3285 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3286 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3288 struct l2cap_move_chan_cfm *cfm = data;
3289 u16 icid, result;
3291 if (cmd_len != sizeof(*cfm))
3292 return -EPROTO;
3294 icid = le16_to_cpu(cfm->icid);
3295 result = le16_to_cpu(cfm->result);
3297 BT_DBG("icid %d, result %d", icid, result);
3299 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3301 return 0;
3304 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3305 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3307 struct l2cap_move_chan_cfm_rsp *rsp = data;
3308 u16 icid;
3310 if (cmd_len != sizeof(*rsp))
3311 return -EPROTO;
3313 icid = le16_to_cpu(rsp->icid);
3315 BT_DBG("icid %d", icid);
3317 return 0;
3320 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3321 u16 to_multiplier)
3323 u16 max_latency;
3325 if (min > max || min < 6 || max > 3200)
3326 return -EINVAL;
3328 if (to_multiplier < 10 || to_multiplier > 3200)
3329 return -EINVAL;
3331 if (max >= to_multiplier * 8)
3332 return -EINVAL;
3334 max_latency = (to_multiplier * 8 / max) - 1;
3335 if (latency > 499 || latency > max_latency)
3336 return -EINVAL;
3338 return 0;
3341 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3342 struct l2cap_cmd_hdr *cmd, u8 *data)
3344 struct hci_conn *hcon = conn->hcon;
3345 struct l2cap_conn_param_update_req *req;
3346 struct l2cap_conn_param_update_rsp rsp;
3347 u16 min, max, latency, to_multiplier, cmd_len;
3348 int err;
3350 if (!(hcon->link_mode & HCI_LM_MASTER))
3351 return -EINVAL;
3353 cmd_len = __le16_to_cpu(cmd->len);
3354 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3355 return -EPROTO;
3357 req = (struct l2cap_conn_param_update_req *) data;
3358 min = __le16_to_cpu(req->min);
3359 max = __le16_to_cpu(req->max);
3360 latency = __le16_to_cpu(req->latency);
3361 to_multiplier = __le16_to_cpu(req->to_multiplier);
3363 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3364 min, max, latency, to_multiplier);
3366 memset(&rsp, 0, sizeof(rsp));
3368 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3369 if (err)
3370 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3371 else
3372 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3374 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3375 sizeof(rsp), &rsp);
3377 if (!err)
3378 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3380 return 0;
3383 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3384 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3386 int err = 0;
3388 switch (cmd->code) {
3389 case L2CAP_COMMAND_REJ:
3390 l2cap_command_rej(conn, cmd, data);
3391 break;
3393 case L2CAP_CONN_REQ:
3394 err = l2cap_connect_req(conn, cmd, data);
3395 break;
3397 case L2CAP_CONN_RSP:
3398 err = l2cap_connect_rsp(conn, cmd, data);
3399 break;
3401 case L2CAP_CONF_REQ:
3402 err = l2cap_config_req(conn, cmd, cmd_len, data);
3403 break;
3405 case L2CAP_CONF_RSP:
3406 err = l2cap_config_rsp(conn, cmd, data);
3407 break;
3409 case L2CAP_DISCONN_REQ:
3410 err = l2cap_disconnect_req(conn, cmd, data);
3411 break;
3413 case L2CAP_DISCONN_RSP:
3414 err = l2cap_disconnect_rsp(conn, cmd, data);
3415 break;
3417 case L2CAP_ECHO_REQ:
3418 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3419 break;
3421 case L2CAP_ECHO_RSP:
3422 break;
3424 case L2CAP_INFO_REQ:
3425 err = l2cap_information_req(conn, cmd, data);
3426 break;
3428 case L2CAP_INFO_RSP:
3429 err = l2cap_information_rsp(conn, cmd, data);
3430 break;
3432 case L2CAP_CREATE_CHAN_REQ:
3433 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3434 break;
3436 case L2CAP_CREATE_CHAN_RSP:
3437 err = l2cap_create_channel_rsp(conn, cmd, data);
3438 break;
3440 case L2CAP_MOVE_CHAN_REQ:
3441 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3442 break;
3444 case L2CAP_MOVE_CHAN_RSP:
3445 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3446 break;
3448 case L2CAP_MOVE_CHAN_CFM:
3449 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3450 break;
3452 case L2CAP_MOVE_CHAN_CFM_RSP:
3453 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3454 break;
3456 default:
3457 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3458 err = -EINVAL;
3459 break;
3462 return err;
3465 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3466 struct l2cap_cmd_hdr *cmd, u8 *data)
3468 switch (cmd->code) {
3469 case L2CAP_COMMAND_REJ:
3470 return 0;
3472 case L2CAP_CONN_PARAM_UPDATE_REQ:
3473 return l2cap_conn_param_update_req(conn, cmd, data);
3475 case L2CAP_CONN_PARAM_UPDATE_RSP:
3476 return 0;
3478 default:
3479 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3480 return -EINVAL;
3484 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3485 struct sk_buff *skb)
3487 u8 *data = skb->data;
3488 int len = skb->len;
3489 struct l2cap_cmd_hdr cmd;
3490 int err;
3492 l2cap_raw_recv(conn, skb);
3494 while (len >= L2CAP_CMD_HDR_SIZE) {
3495 u16 cmd_len;
3496 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3497 data += L2CAP_CMD_HDR_SIZE;
3498 len -= L2CAP_CMD_HDR_SIZE;
3500 cmd_len = le16_to_cpu(cmd.len);
3502 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3504 if (cmd_len > len || !cmd.ident) {
3505 BT_DBG("corrupted command");
3506 break;
3509 if (conn->hcon->type == LE_LINK)
3510 err = l2cap_le_sig_cmd(conn, &cmd, data);
3511 else
3512 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3514 if (err) {
3515 struct l2cap_cmd_rej_unk rej;
3517 BT_ERR("Wrong link type (%d)", err);
3519 /* FIXME: Map err to a valid reason */
3520 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3521 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3524 data += cmd_len;
3525 len -= cmd_len;
3528 kfree_skb(skb);
3531 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3533 u16 our_fcs, rcv_fcs;
3534 int hdr_size;
3536 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3537 hdr_size = L2CAP_EXT_HDR_SIZE;
3538 else
3539 hdr_size = L2CAP_ENH_HDR_SIZE;
3541 if (chan->fcs == L2CAP_FCS_CRC16) {
3542 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3543 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3544 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3546 if (our_fcs != rcv_fcs)
3547 return -EBADMSG;
3549 return 0;
3552 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3554 u32 control = 0;
3556 chan->frames_sent = 0;
3558 control |= __set_reqseq(chan, chan->buffer_seq);
3560 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3561 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3562 l2cap_send_sframe(chan, control);
3563 set_bit(CONN_RNR_SENT, &chan->conn_state);
3566 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3567 l2cap_retransmit_frames(chan);
3569 l2cap_ertm_send(chan);
3571 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3572 chan->frames_sent == 0) {
3573 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3574 l2cap_send_sframe(chan, control);
3578 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3580 struct sk_buff *next_skb;
3581 int tx_seq_offset, next_tx_seq_offset;
3583 bt_cb(skb)->tx_seq = tx_seq;
3584 bt_cb(skb)->sar = sar;
3586 next_skb = skb_peek(&chan->srej_q);
3588 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3590 while (next_skb) {
3591 if (bt_cb(next_skb)->tx_seq == tx_seq)
3592 return -EINVAL;
3594 next_tx_seq_offset = __seq_offset(chan,
3595 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3597 if (next_tx_seq_offset > tx_seq_offset) {
3598 __skb_queue_before(&chan->srej_q, next_skb, skb);
3599 return 0;
3602 if (skb_queue_is_last(&chan->srej_q, next_skb))
3603 next_skb = NULL;
3604 else
3605 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3608 __skb_queue_tail(&chan->srej_q, skb);
3610 return 0;
3613 static void append_skb_frag(struct sk_buff *skb,
3614 struct sk_buff *new_frag, struct sk_buff **last_frag)
3616 /* skb->len reflects data in skb as well as all fragments
3617 * skb->data_len reflects only data in fragments
3619 if (!skb_has_frag_list(skb))
3620 skb_shinfo(skb)->frag_list = new_frag;
3622 new_frag->next = NULL;
3624 (*last_frag)->next = new_frag;
3625 *last_frag = new_frag;
3627 skb->len += new_frag->len;
3628 skb->data_len += new_frag->len;
3629 skb->truesize += new_frag->truesize;
3632 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3634 int err = -EINVAL;
3636 switch (__get_ctrl_sar(chan, control)) {
3637 case L2CAP_SAR_UNSEGMENTED:
3638 if (chan->sdu)
3639 break;
3641 err = chan->ops->recv(chan->data, skb);
3642 break;
3644 case L2CAP_SAR_START:
3645 if (chan->sdu)
3646 break;
3648 chan->sdu_len = get_unaligned_le16(skb->data);
3649 skb_pull(skb, L2CAP_SDULEN_SIZE);
3651 if (chan->sdu_len > chan->imtu) {
3652 err = -EMSGSIZE;
3653 break;
3656 if (skb->len >= chan->sdu_len)
3657 break;
3659 chan->sdu = skb;
3660 chan->sdu_last_frag = skb;
3662 skb = NULL;
3663 err = 0;
3664 break;
3666 case L2CAP_SAR_CONTINUE:
3667 if (!chan->sdu)
3668 break;
3670 append_skb_frag(chan->sdu, skb,
3671 &chan->sdu_last_frag);
3672 skb = NULL;
3674 if (chan->sdu->len >= chan->sdu_len)
3675 break;
3677 err = 0;
3678 break;
3680 case L2CAP_SAR_END:
3681 if (!chan->sdu)
3682 break;
3684 append_skb_frag(chan->sdu, skb,
3685 &chan->sdu_last_frag);
3686 skb = NULL;
3688 if (chan->sdu->len != chan->sdu_len)
3689 break;
3691 err = chan->ops->recv(chan->data, chan->sdu);
3693 if (!err) {
3694 /* Reassembly complete */
3695 chan->sdu = NULL;
3696 chan->sdu_last_frag = NULL;
3697 chan->sdu_len = 0;
3699 break;
3702 if (err) {
3703 kfree_skb(skb);
3704 kfree_skb(chan->sdu);
3705 chan->sdu = NULL;
3706 chan->sdu_last_frag = NULL;
3707 chan->sdu_len = 0;
3710 return err;
3713 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3715 u32 control;
3717 BT_DBG("chan %p, Enter local busy", chan);
3719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3721 control = __set_reqseq(chan, chan->buffer_seq);
3722 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3723 l2cap_send_sframe(chan, control);
3725 set_bit(CONN_RNR_SENT, &chan->conn_state);
3727 __clear_ack_timer(chan);
3730 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3732 u32 control;
3734 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3735 goto done;
3737 control = __set_reqseq(chan, chan->buffer_seq);
3738 control |= __set_ctrl_poll(chan);
3739 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3740 l2cap_send_sframe(chan, control);
3741 chan->retry_count = 1;
3743 __clear_retrans_timer(chan);
3744 __set_monitor_timer(chan);
3746 set_bit(CONN_WAIT_F, &chan->conn_state);
3748 done:
3749 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3750 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3752 BT_DBG("chan %p, Exit local busy", chan);
3755 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3757 if (chan->mode == L2CAP_MODE_ERTM) {
3758 if (busy)
3759 l2cap_ertm_enter_local_busy(chan);
3760 else
3761 l2cap_ertm_exit_local_busy(chan);
3765 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3767 struct sk_buff *skb;
3768 u32 control;
3770 while ((skb = skb_peek(&chan->srej_q)) &&
3771 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3772 int err;
3774 if (bt_cb(skb)->tx_seq != tx_seq)
3775 break;
3777 skb = skb_dequeue(&chan->srej_q);
3778 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3779 err = l2cap_reassemble_sdu(chan, skb, control);
3781 if (err < 0) {
3782 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3783 break;
3786 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3787 tx_seq = __next_seq(chan, tx_seq);
3791 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3793 struct srej_list *l, *tmp;
3794 u32 control;
3796 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3797 if (l->tx_seq == tx_seq) {
3798 list_del(&l->list);
3799 kfree(l);
3800 return;
3802 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3803 control |= __set_reqseq(chan, l->tx_seq);
3804 l2cap_send_sframe(chan, control);
3805 list_del(&l->list);
3806 list_add_tail(&l->list, &chan->srej_l);
3810 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3812 struct srej_list *new;
3813 u32 control;
3815 while (tx_seq != chan->expected_tx_seq) {
3816 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3817 control |= __set_reqseq(chan, chan->expected_tx_seq);
3818 l2cap_send_sframe(chan, control);
3820 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3821 if (!new)
3822 return -ENOMEM;
3824 new->tx_seq = chan->expected_tx_seq;
3826 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3828 list_add_tail(&new->list, &chan->srej_l);
3831 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3833 return 0;
3836 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3838 u16 tx_seq = __get_txseq(chan, rx_control);
3839 u16 req_seq = __get_reqseq(chan, rx_control);
3840 u8 sar = __get_ctrl_sar(chan, rx_control);
3841 int tx_seq_offset, expected_tx_seq_offset;
3842 int num_to_ack = (chan->tx_win/6) + 1;
3843 int err = 0;
3845 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3846 tx_seq, rx_control);
3848 if (__is_ctrl_final(chan, rx_control) &&
3849 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3850 __clear_monitor_timer(chan);
3851 if (chan->unacked_frames > 0)
3852 __set_retrans_timer(chan);
3853 clear_bit(CONN_WAIT_F, &chan->conn_state);
3856 chan->expected_ack_seq = req_seq;
3857 l2cap_drop_acked_frames(chan);
3859 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3861 /* invalid tx_seq */
3862 if (tx_seq_offset >= chan->tx_win) {
3863 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3864 goto drop;
3867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3868 goto drop;
3870 if (tx_seq == chan->expected_tx_seq)
3871 goto expected;
3873 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3874 struct srej_list *first;
3876 first = list_first_entry(&chan->srej_l,
3877 struct srej_list, list);
3878 if (tx_seq == first->tx_seq) {
3879 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3880 l2cap_check_srej_gap(chan, tx_seq);
3882 list_del(&first->list);
3883 kfree(first);
3885 if (list_empty(&chan->srej_l)) {
3886 chan->buffer_seq = chan->buffer_seq_srej;
3887 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3888 l2cap_send_ack(chan);
3889 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3891 } else {
3892 struct srej_list *l;
3894 /* duplicated tx_seq */
3895 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3896 goto drop;
3898 list_for_each_entry(l, &chan->srej_l, list) {
3899 if (l->tx_seq == tx_seq) {
3900 l2cap_resend_srejframe(chan, tx_seq);
3901 return 0;
3905 err = l2cap_send_srejframe(chan, tx_seq);
3906 if (err < 0) {
3907 l2cap_send_disconn_req(chan->conn, chan, -err);
3908 return err;
3911 } else {
3912 expected_tx_seq_offset = __seq_offset(chan,
3913 chan->expected_tx_seq, chan->buffer_seq);
3915 /* duplicated tx_seq */
3916 if (tx_seq_offset < expected_tx_seq_offset)
3917 goto drop;
3919 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3921 BT_DBG("chan %p, Enter SREJ", chan);
3923 INIT_LIST_HEAD(&chan->srej_l);
3924 chan->buffer_seq_srej = chan->buffer_seq;
3926 __skb_queue_head_init(&chan->srej_q);
3927 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3929 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3931 err = l2cap_send_srejframe(chan, tx_seq);
3932 if (err < 0) {
3933 l2cap_send_disconn_req(chan->conn, chan, -err);
3934 return err;
3937 __clear_ack_timer(chan);
3939 return 0;
3941 expected:
3942 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3944 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3945 bt_cb(skb)->tx_seq = tx_seq;
3946 bt_cb(skb)->sar = sar;
3947 __skb_queue_tail(&chan->srej_q, skb);
3948 return 0;
3951 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3952 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3954 if (err < 0) {
3955 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3956 return err;
3959 if (__is_ctrl_final(chan, rx_control)) {
3960 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3961 l2cap_retransmit_frames(chan);
3965 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3966 if (chan->num_acked == num_to_ack - 1)
3967 l2cap_send_ack(chan);
3968 else
3969 __set_ack_timer(chan);
3971 return 0;
3973 drop:
3974 kfree_skb(skb);
3975 return 0;
3978 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3980 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3981 __get_reqseq(chan, rx_control), rx_control);
3983 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3984 l2cap_drop_acked_frames(chan);
3986 if (__is_ctrl_poll(chan, rx_control)) {
3987 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3988 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3990 (chan->unacked_frames > 0))
3991 __set_retrans_timer(chan);
3993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3994 l2cap_send_srejtail(chan);
3995 } else {
3996 l2cap_send_i_or_rr_or_rnr(chan);
3999 } else if (__is_ctrl_final(chan, rx_control)) {
4000 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4002 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4003 l2cap_retransmit_frames(chan);
4005 } else {
4006 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4007 (chan->unacked_frames > 0))
4008 __set_retrans_timer(chan);
4010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4011 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4012 l2cap_send_ack(chan);
4013 else
4014 l2cap_ertm_send(chan);
4018 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4020 u16 tx_seq = __get_reqseq(chan, rx_control);
4022 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4024 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4026 chan->expected_ack_seq = tx_seq;
4027 l2cap_drop_acked_frames(chan);
4029 if (__is_ctrl_final(chan, rx_control)) {
4030 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4031 l2cap_retransmit_frames(chan);
4032 } else {
4033 l2cap_retransmit_frames(chan);
4035 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4036 set_bit(CONN_REJ_ACT, &chan->conn_state);
4039 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4041 u16 tx_seq = __get_reqseq(chan, rx_control);
4043 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4045 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4047 if (__is_ctrl_poll(chan, rx_control)) {
4048 chan->expected_ack_seq = tx_seq;
4049 l2cap_drop_acked_frames(chan);
4051 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4052 l2cap_retransmit_one_frame(chan, tx_seq);
4054 l2cap_ertm_send(chan);
4056 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4057 chan->srej_save_reqseq = tx_seq;
4058 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4060 } else if (__is_ctrl_final(chan, rx_control)) {
4061 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4062 chan->srej_save_reqseq == tx_seq)
4063 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4064 else
4065 l2cap_retransmit_one_frame(chan, tx_seq);
4066 } else {
4067 l2cap_retransmit_one_frame(chan, tx_seq);
4068 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4069 chan->srej_save_reqseq = tx_seq;
4070 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4075 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4077 u16 tx_seq = __get_reqseq(chan, rx_control);
4079 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4081 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4082 chan->expected_ack_seq = tx_seq;
4083 l2cap_drop_acked_frames(chan);
4085 if (__is_ctrl_poll(chan, rx_control))
4086 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4088 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4089 __clear_retrans_timer(chan);
4090 if (__is_ctrl_poll(chan, rx_control))
4091 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4092 return;
4095 if (__is_ctrl_poll(chan, rx_control)) {
4096 l2cap_send_srejtail(chan);
4097 } else {
4098 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4099 l2cap_send_sframe(chan, rx_control);
4103 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4105 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4107 if (__is_ctrl_final(chan, rx_control) &&
4108 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4109 __clear_monitor_timer(chan);
4110 if (chan->unacked_frames > 0)
4111 __set_retrans_timer(chan);
4112 clear_bit(CONN_WAIT_F, &chan->conn_state);
4115 switch (__get_ctrl_super(chan, rx_control)) {
4116 case L2CAP_SUPER_RR:
4117 l2cap_data_channel_rrframe(chan, rx_control);
4118 break;
4120 case L2CAP_SUPER_REJ:
4121 l2cap_data_channel_rejframe(chan, rx_control);
4122 break;
4124 case L2CAP_SUPER_SREJ:
4125 l2cap_data_channel_srejframe(chan, rx_control);
4126 break;
4128 case L2CAP_SUPER_RNR:
4129 l2cap_data_channel_rnrframe(chan, rx_control);
4130 break;
4133 kfree_skb(skb);
4134 return 0;
4137 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4139 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4140 u32 control;
4141 u16 req_seq;
4142 int len, next_tx_seq_offset, req_seq_offset;
4144 control = __get_control(chan, skb->data);
4145 skb_pull(skb, __ctrl_size(chan));
4146 len = skb->len;
4149 * We can just drop the corrupted I-frame here.
4150 * Receiver will miss it and start proper recovery
4151 * procedures and ask retransmission.
4153 if (l2cap_check_fcs(chan, skb))
4154 goto drop;
4156 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4157 len -= L2CAP_SDULEN_SIZE;
4159 if (chan->fcs == L2CAP_FCS_CRC16)
4160 len -= L2CAP_FCS_SIZE;
4162 if (len > chan->mps) {
4163 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4164 goto drop;
4167 req_seq = __get_reqseq(chan, control);
4169 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4171 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4172 chan->expected_ack_seq);
4174 /* check for invalid req-seq */
4175 if (req_seq_offset > next_tx_seq_offset) {
4176 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4177 goto drop;
4180 if (!__is_sframe(chan, control)) {
4181 if (len < 0) {
4182 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4183 goto drop;
4186 l2cap_data_channel_iframe(chan, control, skb);
4187 } else {
4188 if (len != 0) {
4189 BT_ERR("%d", len);
4190 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4191 goto drop;
4194 l2cap_data_channel_sframe(chan, control, skb);
4197 return 0;
4199 drop:
4200 kfree_skb(skb);
4201 return 0;
4204 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4206 struct l2cap_chan *chan;
4207 struct sock *sk = NULL;
4208 u32 control;
4209 u16 tx_seq;
4210 int len;
4212 chan = l2cap_get_chan_by_scid(conn, cid);
4213 if (!chan) {
4214 BT_DBG("unknown cid 0x%4.4x", cid);
4215 goto drop;
4218 sk = chan->sk;
4220 BT_DBG("chan %p, len %d", chan, skb->len);
4222 if (chan->state != BT_CONNECTED)
4223 goto drop;
4225 switch (chan->mode) {
4226 case L2CAP_MODE_BASIC:
4227 /* If socket recv buffers overflows we drop data here
4228 * which is *bad* because L2CAP has to be reliable.
4229 * But we don't have any other choice. L2CAP doesn't
4230 * provide flow control mechanism. */
4232 if (chan->imtu < skb->len)
4233 goto drop;
4235 if (!chan->ops->recv(chan->data, skb))
4236 goto done;
4237 break;
4239 case L2CAP_MODE_ERTM:
4240 l2cap_ertm_data_rcv(sk, skb);
4242 goto done;
4244 case L2CAP_MODE_STREAMING:
4245 control = __get_control(chan, skb->data);
4246 skb_pull(skb, __ctrl_size(chan));
4247 len = skb->len;
4249 if (l2cap_check_fcs(chan, skb))
4250 goto drop;
4252 if (__is_sar_start(chan, control))
4253 len -= L2CAP_SDULEN_SIZE;
4255 if (chan->fcs == L2CAP_FCS_CRC16)
4256 len -= L2CAP_FCS_SIZE;
4258 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4259 goto drop;
4261 tx_seq = __get_txseq(chan, control);
4263 if (chan->expected_tx_seq != tx_seq) {
4264 /* Frame(s) missing - must discard partial SDU */
4265 kfree_skb(chan->sdu);
4266 chan->sdu = NULL;
4267 chan->sdu_last_frag = NULL;
4268 chan->sdu_len = 0;
4270 /* TODO: Notify userland of missing data */
4273 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4275 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4276 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4278 goto done;
4280 default:
4281 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4282 break;
4285 drop:
4286 kfree_skb(skb);
4288 done:
4289 if (sk)
4290 release_sock(sk);
4292 return 0;
4295 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4297 struct sock *sk = NULL;
4298 struct l2cap_chan *chan;
4300 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4301 if (!chan)
4302 goto drop;
4304 sk = chan->sk;
4306 lock_sock(sk);
4308 BT_DBG("sk %p, len %d", sk, skb->len);
4310 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4311 goto drop;
4313 if (chan->imtu < skb->len)
4314 goto drop;
4316 if (!chan->ops->recv(chan->data, skb))
4317 goto done;
4319 drop:
4320 kfree_skb(skb);
4322 done:
4323 if (sk)
4324 release_sock(sk);
4325 return 0;
4328 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4330 struct sock *sk = NULL;
4331 struct l2cap_chan *chan;
4333 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4334 if (!chan)
4335 goto drop;
4337 sk = chan->sk;
4339 lock_sock(sk);
4341 BT_DBG("sk %p, len %d", sk, skb->len);
4343 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4344 goto drop;
4346 if (chan->imtu < skb->len)
4347 goto drop;
4349 if (!chan->ops->recv(chan->data, skb))
4350 goto done;
4352 drop:
4353 kfree_skb(skb);
4355 done:
4356 if (sk)
4357 release_sock(sk);
4358 return 0;
4361 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4363 struct l2cap_hdr *lh = (void *) skb->data;
4364 u16 cid, len;
4365 __le16 psm;
4367 skb_pull(skb, L2CAP_HDR_SIZE);
4368 cid = __le16_to_cpu(lh->cid);
4369 len = __le16_to_cpu(lh->len);
4371 if (len != skb->len) {
4372 kfree_skb(skb);
4373 return;
4376 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4378 switch (cid) {
4379 case L2CAP_CID_LE_SIGNALING:
4380 case L2CAP_CID_SIGNALING:
4381 l2cap_sig_channel(conn, skb);
4382 break;
4384 case L2CAP_CID_CONN_LESS:
4385 psm = get_unaligned_le16(skb->data);
4386 skb_pull(skb, 2);
4387 l2cap_conless_channel(conn, psm, skb);
4388 break;
4390 case L2CAP_CID_LE_DATA:
4391 l2cap_att_channel(conn, cid, skb);
4392 break;
4394 case L2CAP_CID_SMP:
4395 if (smp_sig_channel(conn, skb))
4396 l2cap_conn_del(conn->hcon, EACCES);
4397 break;
4399 default:
4400 l2cap_data_channel(conn, cid, skb);
4401 break;
4405 /* ---- L2CAP interface with lower layer (HCI) ---- */
4407 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4409 int exact = 0, lm1 = 0, lm2 = 0;
4410 struct l2cap_chan *c;
4412 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4414 /* Find listening sockets and check their link_mode */
4415 read_lock(&chan_list_lock);
4416 list_for_each_entry(c, &chan_list, global_l) {
4417 struct sock *sk = c->sk;
4419 if (c->state != BT_LISTEN)
4420 continue;
4422 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4423 lm1 |= HCI_LM_ACCEPT;
4424 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4425 lm1 |= HCI_LM_MASTER;
4426 exact++;
4427 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4428 lm2 |= HCI_LM_ACCEPT;
4429 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4430 lm2 |= HCI_LM_MASTER;
4433 read_unlock(&chan_list_lock);
4435 return exact ? lm1 : lm2;
4438 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4440 struct l2cap_conn *conn;
4442 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4444 if (!status) {
4445 conn = l2cap_conn_add(hcon, status);
4446 if (conn)
4447 l2cap_conn_ready(conn);
4448 } else
4449 l2cap_conn_del(hcon, bt_to_errno(status));
4451 return 0;
4454 int l2cap_disconn_ind(struct hci_conn *hcon)
4456 struct l2cap_conn *conn = hcon->l2cap_data;
4458 BT_DBG("hcon %p", hcon);
4460 if (!conn)
4461 return HCI_ERROR_REMOTE_USER_TERM;
4462 return conn->disc_reason;
4465 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4467 BT_DBG("hcon %p reason %d", hcon, reason);
4469 l2cap_conn_del(hcon, bt_to_errno(reason));
4470 return 0;
4473 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4475 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4476 return;
4478 if (encrypt == 0x00) {
4479 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4480 __clear_chan_timer(chan);
4481 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4482 } else if (chan->sec_level == BT_SECURITY_HIGH)
4483 l2cap_chan_close(chan, ECONNREFUSED);
4484 } else {
4485 if (chan->sec_level == BT_SECURITY_MEDIUM)
4486 __clear_chan_timer(chan);
4490 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4492 struct l2cap_conn *conn = hcon->l2cap_data;
4493 struct l2cap_chan *chan;
4495 if (!conn)
4496 return 0;
4498 BT_DBG("conn %p", conn);
4500 if (hcon->type == LE_LINK) {
4501 smp_distribute_keys(conn, 0);
4502 __cancel_delayed_work(&conn->security_timer);
4505 rcu_read_lock();
4507 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4508 struct sock *sk = chan->sk;
4510 bh_lock_sock(sk);
4512 BT_DBG("chan->scid %d", chan->scid);
4514 if (chan->scid == L2CAP_CID_LE_DATA) {
4515 if (!status && encrypt) {
4516 chan->sec_level = hcon->sec_level;
4517 l2cap_chan_ready(sk);
4520 bh_unlock_sock(sk);
4521 continue;
4524 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4525 bh_unlock_sock(sk);
4526 continue;
4529 if (!status && (chan->state == BT_CONNECTED ||
4530 chan->state == BT_CONFIG)) {
4531 l2cap_check_encryption(chan, encrypt);
4532 bh_unlock_sock(sk);
4533 continue;
4536 if (chan->state == BT_CONNECT) {
4537 if (!status) {
4538 struct l2cap_conn_req req;
4539 req.scid = cpu_to_le16(chan->scid);
4540 req.psm = chan->psm;
4542 chan->ident = l2cap_get_ident(conn);
4543 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4545 l2cap_send_cmd(conn, chan->ident,
4546 L2CAP_CONN_REQ, sizeof(req), &req);
4547 } else {
4548 __clear_chan_timer(chan);
4549 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4551 } else if (chan->state == BT_CONNECT2) {
4552 struct l2cap_conn_rsp rsp;
4553 __u16 res, stat;
4555 if (!status) {
4556 if (bt_sk(sk)->defer_setup) {
4557 struct sock *parent = bt_sk(sk)->parent;
4558 res = L2CAP_CR_PEND;
4559 stat = L2CAP_CS_AUTHOR_PEND;
4560 if (parent)
4561 parent->sk_data_ready(parent, 0);
4562 } else {
4563 l2cap_state_change(chan, BT_CONFIG);
4564 res = L2CAP_CR_SUCCESS;
4565 stat = L2CAP_CS_NO_INFO;
4567 } else {
4568 l2cap_state_change(chan, BT_DISCONN);
4569 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4570 res = L2CAP_CR_SEC_BLOCK;
4571 stat = L2CAP_CS_NO_INFO;
4574 rsp.scid = cpu_to_le16(chan->dcid);
4575 rsp.dcid = cpu_to_le16(chan->scid);
4576 rsp.result = cpu_to_le16(res);
4577 rsp.status = cpu_to_le16(stat);
4578 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4579 sizeof(rsp), &rsp);
4582 bh_unlock_sock(sk);
4585 rcu_read_unlock();
4587 return 0;
4590 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4592 struct l2cap_conn *conn = hcon->l2cap_data;
4594 if (!conn)
4595 conn = l2cap_conn_add(hcon, 0);
4597 if (!conn)
4598 goto drop;
4600 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4602 if (!(flags & ACL_CONT)) {
4603 struct l2cap_hdr *hdr;
4604 struct l2cap_chan *chan;
4605 u16 cid;
4606 int len;
4608 if (conn->rx_len) {
4609 BT_ERR("Unexpected start frame (len %d)", skb->len);
4610 kfree_skb(conn->rx_skb);
4611 conn->rx_skb = NULL;
4612 conn->rx_len = 0;
4613 l2cap_conn_unreliable(conn, ECOMM);
4616 /* Start fragment always begin with Basic L2CAP header */
4617 if (skb->len < L2CAP_HDR_SIZE) {
4618 BT_ERR("Frame is too short (len %d)", skb->len);
4619 l2cap_conn_unreliable(conn, ECOMM);
4620 goto drop;
4623 hdr = (struct l2cap_hdr *) skb->data;
4624 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4625 cid = __le16_to_cpu(hdr->cid);
4627 if (len == skb->len) {
4628 /* Complete frame received */
4629 l2cap_recv_frame(conn, skb);
4630 return 0;
4633 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4635 if (skb->len > len) {
4636 BT_ERR("Frame is too long (len %d, expected len %d)",
4637 skb->len, len);
4638 l2cap_conn_unreliable(conn, ECOMM);
4639 goto drop;
4642 chan = l2cap_get_chan_by_scid(conn, cid);
4644 if (chan && chan->sk) {
4645 struct sock *sk = chan->sk;
4647 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4648 BT_ERR("Frame exceeding recv MTU (len %d, "
4649 "MTU %d)", len,
4650 chan->imtu);
4651 release_sock(sk);
4652 l2cap_conn_unreliable(conn, ECOMM);
4653 goto drop;
4655 release_sock(sk);
4658 /* Allocate skb for the complete frame (with header) */
4659 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4660 if (!conn->rx_skb)
4661 goto drop;
4663 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4664 skb->len);
4665 conn->rx_len = len - skb->len;
4666 } else {
4667 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4669 if (!conn->rx_len) {
4670 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4671 l2cap_conn_unreliable(conn, ECOMM);
4672 goto drop;
4675 if (skb->len > conn->rx_len) {
4676 BT_ERR("Fragment is too long (len %d, expected %d)",
4677 skb->len, conn->rx_len);
4678 kfree_skb(conn->rx_skb);
4679 conn->rx_skb = NULL;
4680 conn->rx_len = 0;
4681 l2cap_conn_unreliable(conn, ECOMM);
4682 goto drop;
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4686 skb->len);
4687 conn->rx_len -= skb->len;
4689 if (!conn->rx_len) {
4690 /* Complete frame received */
4691 l2cap_recv_frame(conn, conn->rx_skb);
4692 conn->rx_skb = NULL;
4696 drop:
4697 kfree_skb(skb);
4698 return 0;
4701 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4703 struct l2cap_chan *c;
4705 read_lock(&chan_list_lock);
4707 list_for_each_entry(c, &chan_list, global_l) {
4708 struct sock *sk = c->sk;
4710 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4711 batostr(&bt_sk(sk)->src),
4712 batostr(&bt_sk(sk)->dst),
4713 c->state, __le16_to_cpu(c->psm),
4714 c->scid, c->dcid, c->imtu, c->omtu,
4715 c->sec_level, c->mode);
4718 read_unlock(&chan_list_lock);
4720 return 0;
4723 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4725 return single_open(file, l2cap_debugfs_show, inode->i_private);
4728 static const struct file_operations l2cap_debugfs_fops = {
4729 .open = l2cap_debugfs_open,
4730 .read = seq_read,
4731 .llseek = seq_lseek,
4732 .release = single_release,
4735 static struct dentry *l2cap_debugfs;
4737 int __init l2cap_init(void)
4739 int err;
4741 err = l2cap_init_sockets();
4742 if (err < 0)
4743 return err;
4745 if (bt_debugfs) {
4746 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4747 bt_debugfs, NULL, &l2cap_debugfs_fops);
4748 if (!l2cap_debugfs)
4749 BT_ERR("Failed to create L2CAP debug file");
4752 return 0;
4755 void l2cap_exit(void)
4757 debugfs_remove(l2cap_debugfs);
4758 l2cap_cleanup_sockets();
4761 module_param(disable_ertm, bool, 0644);
4762 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");