Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / net / bluetooth / l2cap_core.c
blob32d338c30e65a034460652579112fb6c9b61f806
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 struct l2cap_chan *c, *r = NULL;
84 rcu_read_lock();
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
87 if (c->dcid == cid) {
88 r = c;
89 break;
93 rcu_read_unlock();
94 return r;
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
99 struct l2cap_chan *c, *r = NULL;
101 rcu_read_lock();
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
105 r = c;
106 break;
110 rcu_read_unlock();
111 return r;
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 lock_sock(c->sk);
123 return c;
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c, *r = NULL;
130 rcu_read_lock();
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
139 rcu_read_unlock();
140 return r;
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
145 struct l2cap_chan *c;
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 lock_sock(c->sk);
150 return c;
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 struct l2cap_chan *c;
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
159 return c;
161 return NULL;
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 int err;
168 write_lock(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
192 done:
193 write_unlock(&chan_list_lock);
194 return err;
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock(&chan_list_lock);
201 chan->scid = scid;
203 write_unlock(&chan_list_lock);
205 return 0;
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
217 return 0;
220 static char *state_to_string(int state)
222 switch(state) {
223 case BT_CONNECTED:
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
243 return "invalid state";
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
251 chan->state = state;
252 chan->ops->state_change(chan->data, state);
255 static void l2cap_chan_timeout(struct work_struct *work)
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work);
259 struct sock *sk = chan->sk;
260 int reason;
262 BT_DBG("chan %p state %d", chan, chan->state);
264 lock_sock(sk);
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
271 else
272 reason = ETIMEDOUT;
274 l2cap_chan_close(chan, reason);
276 release_sock(sk);
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
284 struct l2cap_chan *chan;
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 if (!chan)
288 return NULL;
290 chan->sk = sk;
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
298 chan->state = BT_OPEN;
300 atomic_set(&chan->refcnt, 1);
302 BT_DBG("sk %p chan %p", sk, chan);
304 return chan;
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
313 l2cap_chan_put(chan);
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
323 chan->conn = conn;
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
331 } else {
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else {
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
355 l2cap_chan_hold(chan);
357 list_add_rcu(&chan->list, &conn->chan_l);
360 /* Delete channel.
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
368 __clear_chan_timer(chan);
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
372 if (conn) {
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
375 synchronize_rcu();
377 l2cap_chan_put(chan);
379 chan->conn = NULL;
380 hci_conn_put(conn->hcon);
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
386 if (err)
387 sk->sk_err = err;
389 if (parent) {
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
392 } else
393 sk->sk_state_change(sk);
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return;
399 skb_queue_purge(&chan->tx_q);
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
408 skb_queue_purge(&chan->srej_q);
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list);
412 kfree(l);
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 struct sock *sk;
421 BT_DBG("parent %p", parent);
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
441 switch (chan->state) {
442 case BT_LISTEN:
443 l2cap_chan_cleanup_listen(sk);
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
447 break;
449 case BT_CONNECTED:
450 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
456 } else
457 l2cap_chan_del(chan, reason);
458 break;
460 case BT_CONNECT2:
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
464 __u16 result;
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
468 else
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
477 sizeof(rsp), &rsp);
480 l2cap_chan_del(chan, reason);
481 break;
483 case BT_CONNECT:
484 case BT_DISCONN:
485 l2cap_chan_del(chan, reason);
486 break;
488 default:
489 sock_set_flag(sk, SOCK_ZAPPED);
490 break;
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
502 default:
503 return HCI_AT_NO_BONDING;
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
511 else
512 return HCI_AT_NO_BONDING;
513 } else {
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
519 default:
520 return HCI_AT_NO_BONDING;
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
528 struct l2cap_conn *conn = chan->conn;
529 __u8 auth_type;
531 auth_type = l2cap_get_auth_type(chan);
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
538 u8 id;
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
546 spin_lock(&conn->lock);
548 if (++conn->tx_ident > 128)
549 conn->tx_ident = 1;
551 id = conn->tx_ident;
553 spin_unlock(&conn->lock);
555 return id;
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 u8 flags;
563 BT_DBG("code 0x%2.2x", code);
565 if (!skb)
566 return;
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
570 else
571 flags = ACL_START;
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
576 hci_send_acl(conn->hchan, skb, flags);
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
581 struct hci_conn *hcon = chan->conn->hcon;
582 u16 flags;
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 skb->priority);
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
599 struct sk_buff *skb;
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
602 int count, hlen;
604 if (chan->state != BT_CONNECTED)
605 return;
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
609 else
610 hlen = L2CAP_ENH_HDR_SIZE;
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
617 count = min_t(unsigned int, conn->mtu, hlen);
619 control |= __set_sframe(chan);
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
628 if (!skb)
629 return;
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
651 } else
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
654 control |= __set_reqseq(chan, chan->buffer_seq);
656 l2cap_send_sframe(chan, control);
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
664 static void l2cap_do_start(struct l2cap_chan *chan)
666 struct l2cap_conn *conn = chan->conn;
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return;
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
684 } else {
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
701 u32 local_feat_mask = l2cap_feat_mask;
702 if (!disable_ertm)
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
705 switch (mode) {
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 default:
711 return 0x00;
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
717 struct sock *sk;
718 struct l2cap_disconn_req req;
720 if (!conn)
721 return;
723 sk = chan->sk;
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
736 l2cap_state_change(chan, BT_DISCONN);
737 sk->sk_err = err;
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
743 struct l2cap_chan *chan;
745 BT_DBG("conn %p", conn);
747 rcu_read_lock();
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
752 bh_lock_sock(sk);
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk);
756 continue;
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk);
765 continue;
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk);
775 continue;
778 req.scid = cpu_to_le16(chan->scid);
779 req.psm = chan->psm;
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
801 } else {
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
806 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 sizeof(rsp), &rsp);
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk);
817 continue;
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
826 bh_unlock_sock(sk);
829 rcu_read_unlock();
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 struct l2cap_chan *c, *c1 = NULL;
839 read_lock(&chan_list_lock);
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
844 if (state && c->state != state)
845 continue;
847 if (c->scid == cid) {
848 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
851 return c;
854 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
856 c1 = c;
860 read_unlock(&chan_list_lock);
862 return c1;
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
870 BT_DBG("");
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src);
875 if (!pchan)
876 return;
878 parent = pchan->sk;
880 lock_sock(parent);
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 goto clean;
888 chan = pchan->ops->new_connection(pchan->data);
889 if (!chan)
890 goto clean;
892 sk = chan->sk;
894 hci_conn_hold(conn->hcon);
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
899 bt_accept_enqueue(parent, sk);
901 l2cap_chan_add(conn, chan);
903 __set_chan_timer(chan, sk->sk_sndtimeo);
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
908 clean:
909 release_sock(parent);
912 static void l2cap_chan_ready(struct sock *sk)
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
917 BT_DBG("sk %p, parent %p", sk, parent);
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
925 if (parent)
926 parent->sk_data_ready(parent, 0);
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 struct l2cap_chan *chan;
933 BT_DBG("conn %p", conn);
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
941 rcu_read_lock();
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
946 bh_lock_sock(sk);
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
960 bh_unlock_sock(sk);
963 rcu_read_unlock();
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 struct l2cap_chan *chan;
971 BT_DBG("conn %p", conn);
973 rcu_read_lock();
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err;
982 rcu_read_unlock();
985 static void l2cap_info_timeout(struct work_struct *work)
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 info_timer.work);
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1002 if (!conn)
1003 return;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 release_sock(sk);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 cancel_delayed_work_sync(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1032 static void security_timeout(struct work_struct *work)
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1045 if (conn || status)
1046 return conn;
1048 hchan = hci_chan_create(hcon);
1049 if (!hchan)
1050 return NULL;
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 if (!conn) {
1054 hci_chan_del(hchan);
1055 return NULL;
1058 hcon->l2cap_data = conn;
1059 conn->hcon = hcon;
1060 conn->hchan = hchan;
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1066 else
1067 conn->mtu = hcon->hdev->acl_mtu;
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1072 conn->feat_mask = 0;
1074 spin_lock_init(&conn->lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1080 else
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1085 return conn;
1088 /* ---- Socket interface ---- */
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1095 struct l2cap_chan *c, *c1 = NULL;
1097 read_lock(&chan_list_lock);
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1102 if (state && c->state != state)
1103 continue;
1105 if (c->psm == psm) {
1106 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1109 return c;
1112 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1114 c1 = c;
1118 read_unlock(&chan_list_lock);
1120 return c1;
1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1130 __u8 auth_type;
1131 int err;
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm);
1136 hdev = hci_get_route(dst, src);
1137 if (!hdev)
1138 return -EHOSTUNREACH;
1140 hci_dev_lock(hdev);
1142 lock_sock(sk);
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1147 err = -EINVAL;
1148 goto done;
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1152 err = -EINVAL;
1153 goto done;
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1158 break;
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1161 if (!disable_ertm)
1162 break;
1163 /* fall through */
1164 default:
1165 err = -ENOTSUPP;
1166 goto done;
1169 switch (sk->sk_state) {
1170 case BT_CONNECT:
1171 case BT_CONNECT2:
1172 case BT_CONFIG:
1173 /* Already connecting */
1174 err = 0;
1175 goto done;
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1187 default:
1188 err = -EBADFD;
1189 goto done;
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1194 chan->psm = psm;
1195 chan->dcid = cid;
1197 auth_type = l2cap_get_auth_type(chan);
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1202 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1206 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon);
1208 goto done;
1211 conn = l2cap_conn_add(hcon, 0);
1212 if (!conn) {
1213 hci_conn_put(hcon);
1214 err = -ENOMEM;
1215 goto done;
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1221 l2cap_chan_add(conn, chan);
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1231 } else
1232 l2cap_do_start(chan);
1235 err = 0;
1237 done:
1238 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return err;
1243 int __l2cap_wait_ack(struct sock *sk)
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1247 int err = 0;
1248 int timeo = HZ/5;
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1253 if (!timeo)
1254 timeo = HZ/5;
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1258 break;
1261 release_sock(sk);
1262 timeo = schedule_timeout(timeo);
1263 lock_sock(sk);
1264 set_current_state(TASK_INTERRUPTIBLE);
1266 err = sock_error(sk);
1267 if (err)
1268 break;
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1272 return err;
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1281 BT_DBG("chan %p", chan);
1283 lock_sock(sk);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk);
1287 return;
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk);
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1303 BT_DBG("chan %p", chan);
1305 lock_sock(sk);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk);
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1317 struct sk_buff *skb;
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 break;
1324 skb = skb_dequeue(&chan->tx_q);
1325 kfree_skb(skb);
1327 chan->unacked_frames--;
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1336 struct sk_buff *skb;
1337 u32 control;
1338 u16 fcs;
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1352 l2cap_do_send(chan, skb);
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1364 skb = skb_peek(&chan->tx_q);
1365 if (!skb)
1366 return;
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return;
1372 skb = skb_queue_next(&chan->tx_q, skb);
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return;
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1402 l2cap_do_send(chan, tx_skb);
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1407 struct sk_buff *skb, *tx_skb;
1408 u16 fcs;
1409 u32 control;
1410 int nsent = 0;
1412 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN;
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break;
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1425 bt_cb(skb)->retries++;
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1445 l2cap_do_send(chan, tx_skb);
1447 __set_retrans_timer(chan);
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1456 chan->frames_sent++;
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1460 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1463 nsent++;
1466 return nsent;
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1471 int ret;
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1478 return ret;
1481 static void l2cap_send_ack(struct l2cap_chan *chan)
1483 u32 control = 0;
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1491 return;
1494 if (l2cap_ertm_send(chan) > 0)
1495 return;
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1501 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1503 struct srej_list *tail;
1504 u32 control;
1506 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1507 control |= __set_ctrl_final(chan);
1509 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1510 control |= __set_reqseq(chan, tail->tx_seq);
1512 l2cap_send_sframe(chan, control);
1515 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1518 struct sk_buff **frag;
1519 int err, sent = 0;
1521 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1522 return -EFAULT;
1524 sent += count;
1525 len -= count;
1527 /* Continuation fragments (no L2CAP header) */
1528 frag = &skb_shinfo(skb)->frag_list;
1529 while (len) {
1530 count = min_t(unsigned int, conn->mtu, len);
1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1533 if (!*frag)
1534 return err;
1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1536 return -EFAULT;
1538 (*frag)->priority = skb->priority;
1540 sent += count;
1541 len -= count;
1543 frag = &(*frag)->next;
1546 return sent;
1549 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1550 struct msghdr *msg, size_t len,
1551 u32 priority)
1553 struct sock *sk = chan->sk;
1554 struct l2cap_conn *conn = chan->conn;
1555 struct sk_buff *skb;
1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1557 struct l2cap_hdr *lh;
1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1561 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen,
1563 msg->msg_flags & MSG_DONTWAIT, &err);
1564 if (!skb)
1565 return ERR_PTR(err);
1567 skb->priority = priority;
1569 /* Create L2CAP header */
1570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1571 lh->cid = cpu_to_le16(chan->dcid);
1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1573 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1576 if (unlikely(err < 0)) {
1577 kfree_skb(skb);
1578 return ERR_PTR(err);
1580 return skb;
1583 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1584 struct msghdr *msg, size_t len,
1585 u32 priority)
1587 struct sock *sk = chan->sk;
1588 struct l2cap_conn *conn = chan->conn;
1589 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh;
1593 BT_DBG("sk %p len %d", sk, (int)len);
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (!skb)
1599 return ERR_PTR(err);
1601 skb->priority = priority;
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1609 if (unlikely(err < 0)) {
1610 kfree_skb(skb);
1611 return ERR_PTR(err);
1613 return skb;
1616 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1617 struct msghdr *msg, size_t len,
1618 u32 control, u16 sdulen)
1620 struct sock *sk = chan->sk;
1621 struct l2cap_conn *conn = chan->conn;
1622 struct sk_buff *skb;
1623 int err, count, hlen;
1624 struct l2cap_hdr *lh;
1626 BT_DBG("sk %p len %d", sk, (int)len);
1628 if (!conn)
1629 return ERR_PTR(-ENOTCONN);
1631 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1632 hlen = L2CAP_EXT_HDR_SIZE;
1633 else
1634 hlen = L2CAP_ENH_HDR_SIZE;
1636 if (sdulen)
1637 hlen += L2CAP_SDULEN_SIZE;
1639 if (chan->fcs == L2CAP_FCS_CRC16)
1640 hlen += L2CAP_FCS_SIZE;
1642 count = min_t(unsigned int, (conn->mtu - hlen), len);
1643 skb = bt_skb_send_alloc(sk, count + hlen,
1644 msg->msg_flags & MSG_DONTWAIT, &err);
1645 if (!skb)
1646 return ERR_PTR(err);
1648 /* Create L2CAP header */
1649 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1650 lh->cid = cpu_to_le16(chan->dcid);
1651 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1653 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1655 if (sdulen)
1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1659 if (unlikely(err < 0)) {
1660 kfree_skb(skb);
1661 return ERR_PTR(err);
1664 if (chan->fcs == L2CAP_FCS_CRC16)
1665 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1667 bt_cb(skb)->retries = 0;
1668 return skb;
1671 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1673 struct sk_buff *skb;
1674 struct sk_buff_head sar_queue;
1675 u32 control;
1676 size_t size = 0;
1678 skb_queue_head_init(&sar_queue);
1679 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1680 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1681 if (IS_ERR(skb))
1682 return PTR_ERR(skb);
1684 __skb_queue_tail(&sar_queue, skb);
1685 len -= chan->remote_mps;
1686 size += chan->remote_mps;
1688 while (len > 0) {
1689 size_t buflen;
1691 if (len > chan->remote_mps) {
1692 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1693 buflen = chan->remote_mps;
1694 } else {
1695 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1696 buflen = len;
1699 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1700 if (IS_ERR(skb)) {
1701 skb_queue_purge(&sar_queue);
1702 return PTR_ERR(skb);
1705 __skb_queue_tail(&sar_queue, skb);
1706 len -= buflen;
1707 size += buflen;
1709 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1710 if (chan->tx_send_head == NULL)
1711 chan->tx_send_head = sar_queue.next;
1713 return size;
1716 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1717 u32 priority)
1719 struct sk_buff *skb;
1720 u32 control;
1721 int err;
1723 /* Connectionless channel */
1724 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1725 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1726 if (IS_ERR(skb))
1727 return PTR_ERR(skb);
1729 l2cap_do_send(chan, skb);
1730 return len;
1733 switch (chan->mode) {
1734 case L2CAP_MODE_BASIC:
1735 /* Check outgoing MTU */
1736 if (len > chan->omtu)
1737 return -EMSGSIZE;
1739 /* Create a basic PDU */
1740 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1741 if (IS_ERR(skb))
1742 return PTR_ERR(skb);
1744 l2cap_do_send(chan, skb);
1745 err = len;
1746 break;
1748 case L2CAP_MODE_ERTM:
1749 case L2CAP_MODE_STREAMING:
1750 /* Entire SDU fits into one PDU */
1751 if (len <= chan->remote_mps) {
1752 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1753 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1758 __skb_queue_tail(&chan->tx_q, skb);
1760 if (chan->tx_send_head == NULL)
1761 chan->tx_send_head = skb;
1763 } else {
1764 /* Segment SDU into multiples PDUs */
1765 err = l2cap_sar_segment_sdu(chan, msg, len);
1766 if (err < 0)
1767 return err;
1770 if (chan->mode == L2CAP_MODE_STREAMING) {
1771 l2cap_streaming_send(chan);
1772 err = len;
1773 break;
1776 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1777 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1778 err = len;
1779 break;
1782 err = l2cap_ertm_send(chan);
1783 if (err >= 0)
1784 err = len;
1786 break;
1788 default:
1789 BT_DBG("bad state %1.1x", chan->mode);
1790 err = -EBADFD;
1793 return err;
1796 /* Copy frame to all raw sockets on that connection */
1797 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1799 struct sk_buff *nskb;
1800 struct l2cap_chan *chan;
1802 BT_DBG("conn %p", conn);
1804 rcu_read_lock();
1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1807 struct sock *sk = chan->sk;
1808 if (chan->chan_type != L2CAP_CHAN_RAW)
1809 continue;
1811 /* Don't send frame to the socket it came from */
1812 if (skb->sk == sk)
1813 continue;
1814 nskb = skb_clone(skb, GFP_ATOMIC);
1815 if (!nskb)
1816 continue;
1818 if (chan->ops->recv(chan->data, nskb))
1819 kfree_skb(nskb);
1822 rcu_read_unlock();
1825 /* ---- L2CAP signalling commands ---- */
1826 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1827 u8 code, u8 ident, u16 dlen, void *data)
1829 struct sk_buff *skb, **frag;
1830 struct l2cap_cmd_hdr *cmd;
1831 struct l2cap_hdr *lh;
1832 int len, count;
1834 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1835 conn, code, ident, dlen);
1837 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1838 count = min_t(unsigned int, conn->mtu, len);
1840 skb = bt_skb_alloc(count, GFP_ATOMIC);
1841 if (!skb)
1842 return NULL;
1844 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1845 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1847 if (conn->hcon->type == LE_LINK)
1848 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1849 else
1850 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1852 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1853 cmd->code = code;
1854 cmd->ident = ident;
1855 cmd->len = cpu_to_le16(dlen);
1857 if (dlen) {
1858 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1859 memcpy(skb_put(skb, count), data, count);
1860 data += count;
1863 len -= skb->len;
1865 /* Continuation fragments (no L2CAP header) */
1866 frag = &skb_shinfo(skb)->frag_list;
1867 while (len) {
1868 count = min_t(unsigned int, conn->mtu, len);
1870 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1871 if (!*frag)
1872 goto fail;
1874 memcpy(skb_put(*frag, count), data, count);
1876 len -= count;
1877 data += count;
1879 frag = &(*frag)->next;
1882 return skb;
1884 fail:
1885 kfree_skb(skb);
1886 return NULL;
1889 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1891 struct l2cap_conf_opt *opt = *ptr;
1892 int len;
1894 len = L2CAP_CONF_OPT_SIZE + opt->len;
1895 *ptr += len;
1897 *type = opt->type;
1898 *olen = opt->len;
1900 switch (opt->len) {
1901 case 1:
1902 *val = *((u8 *) opt->val);
1903 break;
1905 case 2:
1906 *val = get_unaligned_le16(opt->val);
1907 break;
1909 case 4:
1910 *val = get_unaligned_le32(opt->val);
1911 break;
1913 default:
1914 *val = (unsigned long) opt->val;
1915 break;
1918 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1919 return len;
1922 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1924 struct l2cap_conf_opt *opt = *ptr;
1926 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1928 opt->type = type;
1929 opt->len = len;
1931 switch (len) {
1932 case 1:
1933 *((u8 *) opt->val) = val;
1934 break;
1936 case 2:
1937 put_unaligned_le16(val, opt->val);
1938 break;
1940 case 4:
1941 put_unaligned_le32(val, opt->val);
1942 break;
1944 default:
1945 memcpy(opt->val, (void *) val, len);
1946 break;
1949 *ptr += L2CAP_CONF_OPT_SIZE + len;
1952 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1954 struct l2cap_conf_efs efs;
1956 switch (chan->mode) {
1957 case L2CAP_MODE_ERTM:
1958 efs.id = chan->local_id;
1959 efs.stype = chan->local_stype;
1960 efs.msdu = cpu_to_le16(chan->local_msdu);
1961 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1962 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1963 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1964 break;
1966 case L2CAP_MODE_STREAMING:
1967 efs.id = 1;
1968 efs.stype = L2CAP_SERV_BESTEFFORT;
1969 efs.msdu = cpu_to_le16(chan->local_msdu);
1970 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1971 efs.acc_lat = 0;
1972 efs.flush_to = 0;
1973 break;
1975 default:
1976 return;
1979 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1980 (unsigned long) &efs);
1983 static void l2cap_ack_timeout(struct work_struct *work)
1985 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1986 ack_timer.work);
1988 BT_DBG("chan %p", chan);
1990 lock_sock(chan->sk);
1991 l2cap_send_ack(chan);
1992 release_sock(chan->sk);
1995 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1997 chan->expected_ack_seq = 0;
1998 chan->unacked_frames = 0;
1999 chan->buffer_seq = 0;
2000 chan->num_acked = 0;
2001 chan->frames_sent = 0;
2003 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2004 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2005 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2007 skb_queue_head_init(&chan->srej_q);
2009 INIT_LIST_HEAD(&chan->srej_l);
2012 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2014 switch (mode) {
2015 case L2CAP_MODE_STREAMING:
2016 case L2CAP_MODE_ERTM:
2017 if (l2cap_mode_supported(mode, remote_feat_mask))
2018 return mode;
2019 /* fall through */
2020 default:
2021 return L2CAP_MODE_BASIC;
2025 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2027 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2030 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2032 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2035 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2037 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2038 __l2cap_ews_supported(chan)) {
2039 /* use extended control field */
2040 set_bit(FLAG_EXT_CTRL, &chan->flags);
2041 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2042 } else {
2043 chan->tx_win = min_t(u16, chan->tx_win,
2044 L2CAP_DEFAULT_TX_WINDOW);
2045 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2049 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2051 struct l2cap_conf_req *req = data;
2052 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2053 void *ptr = req->data;
2054 u16 size;
2056 BT_DBG("chan %p", chan);
2058 if (chan->num_conf_req || chan->num_conf_rsp)
2059 goto done;
2061 switch (chan->mode) {
2062 case L2CAP_MODE_STREAMING:
2063 case L2CAP_MODE_ERTM:
2064 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2065 break;
2067 if (__l2cap_efs_supported(chan))
2068 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2070 /* fall through */
2071 default:
2072 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2073 break;
2076 done:
2077 if (chan->imtu != L2CAP_DEFAULT_MTU)
2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2080 switch (chan->mode) {
2081 case L2CAP_MODE_BASIC:
2082 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2083 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2084 break;
2086 rfc.mode = L2CAP_MODE_BASIC;
2087 rfc.txwin_size = 0;
2088 rfc.max_transmit = 0;
2089 rfc.retrans_timeout = 0;
2090 rfc.monitor_timeout = 0;
2091 rfc.max_pdu_size = 0;
2093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2094 (unsigned long) &rfc);
2095 break;
2097 case L2CAP_MODE_ERTM:
2098 rfc.mode = L2CAP_MODE_ERTM;
2099 rfc.max_transmit = chan->max_tx;
2100 rfc.retrans_timeout = 0;
2101 rfc.monitor_timeout = 0;
2103 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2104 L2CAP_EXT_HDR_SIZE -
2105 L2CAP_SDULEN_SIZE -
2106 L2CAP_FCS_SIZE);
2107 rfc.max_pdu_size = cpu_to_le16(size);
2109 l2cap_txwin_setup(chan);
2111 rfc.txwin_size = min_t(u16, chan->tx_win,
2112 L2CAP_DEFAULT_TX_WINDOW);
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2115 (unsigned long) &rfc);
2117 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2118 l2cap_add_opt_efs(&ptr, chan);
2120 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2121 break;
2123 if (chan->fcs == L2CAP_FCS_NONE ||
2124 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2125 chan->fcs = L2CAP_FCS_NONE;
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2129 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2131 chan->tx_win);
2132 break;
2134 case L2CAP_MODE_STREAMING:
2135 rfc.mode = L2CAP_MODE_STREAMING;
2136 rfc.txwin_size = 0;
2137 rfc.max_transmit = 0;
2138 rfc.retrans_timeout = 0;
2139 rfc.monitor_timeout = 0;
2141 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2142 L2CAP_EXT_HDR_SIZE -
2143 L2CAP_SDULEN_SIZE -
2144 L2CAP_FCS_SIZE);
2145 rfc.max_pdu_size = cpu_to_le16(size);
2147 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2148 (unsigned long) &rfc);
2150 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2151 l2cap_add_opt_efs(&ptr, chan);
2153 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2154 break;
2156 if (chan->fcs == L2CAP_FCS_NONE ||
2157 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2158 chan->fcs = L2CAP_FCS_NONE;
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2161 break;
2164 req->dcid = cpu_to_le16(chan->dcid);
2165 req->flags = cpu_to_le16(0);
2167 return ptr - data;
2170 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2172 struct l2cap_conf_rsp *rsp = data;
2173 void *ptr = rsp->data;
2174 void *req = chan->conf_req;
2175 int len = chan->conf_len;
2176 int type, hint, olen;
2177 unsigned long val;
2178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2179 struct l2cap_conf_efs efs;
2180 u8 remote_efs = 0;
2181 u16 mtu = L2CAP_DEFAULT_MTU;
2182 u16 result = L2CAP_CONF_SUCCESS;
2183 u16 size;
2185 BT_DBG("chan %p", chan);
2187 while (len >= L2CAP_CONF_OPT_SIZE) {
2188 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2190 hint = type & L2CAP_CONF_HINT;
2191 type &= L2CAP_CONF_MASK;
2193 switch (type) {
2194 case L2CAP_CONF_MTU:
2195 mtu = val;
2196 break;
2198 case L2CAP_CONF_FLUSH_TO:
2199 chan->flush_to = val;
2200 break;
2202 case L2CAP_CONF_QOS:
2203 break;
2205 case L2CAP_CONF_RFC:
2206 if (olen == sizeof(rfc))
2207 memcpy(&rfc, (void *) val, olen);
2208 break;
2210 case L2CAP_CONF_FCS:
2211 if (val == L2CAP_FCS_NONE)
2212 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2213 break;
2215 case L2CAP_CONF_EFS:
2216 remote_efs = 1;
2217 if (olen == sizeof(efs))
2218 memcpy(&efs, (void *) val, olen);
2219 break;
2221 case L2CAP_CONF_EWS:
2222 if (!enable_hs)
2223 return -ECONNREFUSED;
2225 set_bit(FLAG_EXT_CTRL, &chan->flags);
2226 set_bit(CONF_EWS_RECV, &chan->conf_state);
2227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2228 chan->remote_tx_win = val;
2229 break;
2231 default:
2232 if (hint)
2233 break;
2235 result = L2CAP_CONF_UNKNOWN;
2236 *((u8 *) ptr++) = type;
2237 break;
2241 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2242 goto done;
2244 switch (chan->mode) {
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2248 chan->mode = l2cap_select_mode(rfc.mode,
2249 chan->conn->feat_mask);
2250 break;
2253 if (remote_efs) {
2254 if (__l2cap_efs_supported(chan))
2255 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2256 else
2257 return -ECONNREFUSED;
2260 if (chan->mode != rfc.mode)
2261 return -ECONNREFUSED;
2263 break;
2266 done:
2267 if (chan->mode != rfc.mode) {
2268 result = L2CAP_CONF_UNACCEPT;
2269 rfc.mode = chan->mode;
2271 if (chan->num_conf_rsp == 1)
2272 return -ECONNREFUSED;
2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2275 sizeof(rfc), (unsigned long) &rfc);
2278 if (result == L2CAP_CONF_SUCCESS) {
2279 /* Configure output options and let the other side know
2280 * which ones we don't like. */
2282 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2283 result = L2CAP_CONF_UNACCEPT;
2284 else {
2285 chan->omtu = mtu;
2286 set_bit(CONF_MTU_DONE, &chan->conf_state);
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2290 if (remote_efs) {
2291 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2292 efs.stype != L2CAP_SERV_NOTRAFIC &&
2293 efs.stype != chan->local_stype) {
2295 result = L2CAP_CONF_UNACCEPT;
2297 if (chan->num_conf_req >= 1)
2298 return -ECONNREFUSED;
2300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2301 sizeof(efs),
2302 (unsigned long) &efs);
2303 } else {
2304 /* Send PENDING Conf Rsp */
2305 result = L2CAP_CONF_PENDING;
2306 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2310 switch (rfc.mode) {
2311 case L2CAP_MODE_BASIC:
2312 chan->fcs = L2CAP_FCS_NONE;
2313 set_bit(CONF_MODE_DONE, &chan->conf_state);
2314 break;
2316 case L2CAP_MODE_ERTM:
2317 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2318 chan->remote_tx_win = rfc.txwin_size;
2319 else
2320 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2322 chan->remote_max_tx = rfc.max_transmit;
2324 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2325 chan->conn->mtu -
2326 L2CAP_EXT_HDR_SIZE -
2327 L2CAP_SDULEN_SIZE -
2328 L2CAP_FCS_SIZE);
2329 rfc.max_pdu_size = cpu_to_le16(size);
2330 chan->remote_mps = size;
2332 rfc.retrans_timeout =
2333 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2334 rfc.monitor_timeout =
2335 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2337 set_bit(CONF_MODE_DONE, &chan->conf_state);
2339 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2340 sizeof(rfc), (unsigned long) &rfc);
2342 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2343 chan->remote_id = efs.id;
2344 chan->remote_stype = efs.stype;
2345 chan->remote_msdu = le16_to_cpu(efs.msdu);
2346 chan->remote_flush_to =
2347 le32_to_cpu(efs.flush_to);
2348 chan->remote_acc_lat =
2349 le32_to_cpu(efs.acc_lat);
2350 chan->remote_sdu_itime =
2351 le32_to_cpu(efs.sdu_itime);
2352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2353 sizeof(efs), (unsigned long) &efs);
2355 break;
2357 case L2CAP_MODE_STREAMING:
2358 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2359 chan->conn->mtu -
2360 L2CAP_EXT_HDR_SIZE -
2361 L2CAP_SDULEN_SIZE -
2362 L2CAP_FCS_SIZE);
2363 rfc.max_pdu_size = cpu_to_le16(size);
2364 chan->remote_mps = size;
2366 set_bit(CONF_MODE_DONE, &chan->conf_state);
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2369 sizeof(rfc), (unsigned long) &rfc);
2371 break;
2373 default:
2374 result = L2CAP_CONF_UNACCEPT;
2376 memset(&rfc, 0, sizeof(rfc));
2377 rfc.mode = chan->mode;
2380 if (result == L2CAP_CONF_SUCCESS)
2381 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2383 rsp->scid = cpu_to_le16(chan->dcid);
2384 rsp->result = cpu_to_le16(result);
2385 rsp->flags = cpu_to_le16(0x0000);
2387 return ptr - data;
2390 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2392 struct l2cap_conf_req *req = data;
2393 void *ptr = req->data;
2394 int type, olen;
2395 unsigned long val;
2396 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2397 struct l2cap_conf_efs efs;
2399 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2401 while (len >= L2CAP_CONF_OPT_SIZE) {
2402 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2404 switch (type) {
2405 case L2CAP_CONF_MTU:
2406 if (val < L2CAP_DEFAULT_MIN_MTU) {
2407 *result = L2CAP_CONF_UNACCEPT;
2408 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2409 } else
2410 chan->imtu = val;
2411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2412 break;
2414 case L2CAP_CONF_FLUSH_TO:
2415 chan->flush_to = val;
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2417 2, chan->flush_to);
2418 break;
2420 case L2CAP_CONF_RFC:
2421 if (olen == sizeof(rfc))
2422 memcpy(&rfc, (void *)val, olen);
2424 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2425 rfc.mode != chan->mode)
2426 return -ECONNREFUSED;
2428 chan->fcs = 0;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2431 sizeof(rfc), (unsigned long) &rfc);
2432 break;
2434 case L2CAP_CONF_EWS:
2435 chan->tx_win = min_t(u16, val,
2436 L2CAP_DEFAULT_EXT_WINDOW);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2438 chan->tx_win);
2439 break;
2441 case L2CAP_CONF_EFS:
2442 if (olen == sizeof(efs))
2443 memcpy(&efs, (void *)val, olen);
2445 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2446 efs.stype != L2CAP_SERV_NOTRAFIC &&
2447 efs.stype != chan->local_stype)
2448 return -ECONNREFUSED;
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2451 sizeof(efs), (unsigned long) &efs);
2452 break;
2456 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2457 return -ECONNREFUSED;
2459 chan->mode = rfc.mode;
2461 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2462 switch (rfc.mode) {
2463 case L2CAP_MODE_ERTM:
2464 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2465 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2466 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2468 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2469 chan->local_msdu = le16_to_cpu(efs.msdu);
2470 chan->local_sdu_itime =
2471 le32_to_cpu(efs.sdu_itime);
2472 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2473 chan->local_flush_to =
2474 le32_to_cpu(efs.flush_to);
2476 break;
2478 case L2CAP_MODE_STREAMING:
2479 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2483 req->dcid = cpu_to_le16(chan->dcid);
2484 req->flags = cpu_to_le16(0x0000);
2486 return ptr - data;
2489 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2491 struct l2cap_conf_rsp *rsp = data;
2492 void *ptr = rsp->data;
2494 BT_DBG("chan %p", chan);
2496 rsp->scid = cpu_to_le16(chan->dcid);
2497 rsp->result = cpu_to_le16(result);
2498 rsp->flags = cpu_to_le16(flags);
2500 return ptr - data;
2503 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2505 struct l2cap_conn_rsp rsp;
2506 struct l2cap_conn *conn = chan->conn;
2507 u8 buf[128];
2509 rsp.scid = cpu_to_le16(chan->dcid);
2510 rsp.dcid = cpu_to_le16(chan->scid);
2511 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2512 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2513 l2cap_send_cmd(conn, chan->ident,
2514 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2517 return;
2519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2520 l2cap_build_conf_req(chan, buf), buf);
2521 chan->num_conf_req++;
2524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2526 int type, olen;
2527 unsigned long val;
2528 struct l2cap_conf_rfc rfc;
2530 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2532 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2533 return;
2535 while (len >= L2CAP_CONF_OPT_SIZE) {
2536 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2538 switch (type) {
2539 case L2CAP_CONF_RFC:
2540 if (olen == sizeof(rfc))
2541 memcpy(&rfc, (void *)val, olen);
2542 goto done;
2546 /* Use sane default values in case a misbehaving remote device
2547 * did not send an RFC option.
2549 rfc.mode = chan->mode;
2550 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2551 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2552 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2554 BT_ERR("Expected RFC option was not found, using defaults");
2556 done:
2557 switch (rfc.mode) {
2558 case L2CAP_MODE_ERTM:
2559 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2560 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2561 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2562 break;
2563 case L2CAP_MODE_STREAMING:
2564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2568 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2570 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2572 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2573 return 0;
2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2576 cmd->ident == conn->info_ident) {
2577 cancel_delayed_work(&conn->info_timer);
2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2580 conn->info_ident = 0;
2582 l2cap_conn_start(conn);
2585 return 0;
2588 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2590 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2591 struct l2cap_conn_rsp rsp;
2592 struct l2cap_chan *chan = NULL, *pchan;
2593 struct sock *parent, *sk = NULL;
2594 int result, status = L2CAP_CS_NO_INFO;
2596 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2597 __le16 psm = req->psm;
2599 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2601 /* Check if we have socket listening on psm */
2602 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2603 if (!pchan) {
2604 result = L2CAP_CR_BAD_PSM;
2605 goto sendresp;
2608 parent = pchan->sk;
2610 lock_sock(parent);
2612 /* Check if the ACL is secure enough (if not SDP) */
2613 if (psm != cpu_to_le16(0x0001) &&
2614 !hci_conn_check_link_mode(conn->hcon)) {
2615 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2616 result = L2CAP_CR_SEC_BLOCK;
2617 goto response;
2620 result = L2CAP_CR_NO_MEM;
2622 /* Check for backlog size */
2623 if (sk_acceptq_is_full(parent)) {
2624 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2625 goto response;
2628 chan = pchan->ops->new_connection(pchan->data);
2629 if (!chan)
2630 goto response;
2632 sk = chan->sk;
2634 /* Check if we already have channel with that dcid */
2635 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2636 sock_set_flag(sk, SOCK_ZAPPED);
2637 chan->ops->close(chan->data);
2638 goto response;
2641 hci_conn_hold(conn->hcon);
2643 bacpy(&bt_sk(sk)->src, conn->src);
2644 bacpy(&bt_sk(sk)->dst, conn->dst);
2645 chan->psm = psm;
2646 chan->dcid = scid;
2648 bt_accept_enqueue(parent, sk);
2650 l2cap_chan_add(conn, chan);
2652 dcid = chan->scid;
2654 __set_chan_timer(chan, sk->sk_sndtimeo);
2656 chan->ident = cmd->ident;
2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2659 if (l2cap_chan_check_security(chan)) {
2660 if (bt_sk(sk)->defer_setup) {
2661 l2cap_state_change(chan, BT_CONNECT2);
2662 result = L2CAP_CR_PEND;
2663 status = L2CAP_CS_AUTHOR_PEND;
2664 parent->sk_data_ready(parent, 0);
2665 } else {
2666 l2cap_state_change(chan, BT_CONFIG);
2667 result = L2CAP_CR_SUCCESS;
2668 status = L2CAP_CS_NO_INFO;
2670 } else {
2671 l2cap_state_change(chan, BT_CONNECT2);
2672 result = L2CAP_CR_PEND;
2673 status = L2CAP_CS_AUTHEN_PEND;
2675 } else {
2676 l2cap_state_change(chan, BT_CONNECT2);
2677 result = L2CAP_CR_PEND;
2678 status = L2CAP_CS_NO_INFO;
2681 response:
2682 release_sock(parent);
2684 sendresp:
2685 rsp.scid = cpu_to_le16(scid);
2686 rsp.dcid = cpu_to_le16(dcid);
2687 rsp.result = cpu_to_le16(result);
2688 rsp.status = cpu_to_le16(status);
2689 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2691 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2692 struct l2cap_info_req info;
2693 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2696 conn->info_ident = l2cap_get_ident(conn);
2698 schedule_delayed_work(&conn->info_timer,
2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2701 l2cap_send_cmd(conn, conn->info_ident,
2702 L2CAP_INFO_REQ, sizeof(info), &info);
2705 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2706 result == L2CAP_CR_SUCCESS) {
2707 u8 buf[128];
2708 set_bit(CONF_REQ_SENT, &chan->conf_state);
2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2710 l2cap_build_conf_req(chan, buf), buf);
2711 chan->num_conf_req++;
2714 return 0;
2717 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2720 u16 scid, dcid, result, status;
2721 struct l2cap_chan *chan;
2722 struct sock *sk;
2723 u8 req[128];
2725 scid = __le16_to_cpu(rsp->scid);
2726 dcid = __le16_to_cpu(rsp->dcid);
2727 result = __le16_to_cpu(rsp->result);
2728 status = __le16_to_cpu(rsp->status);
2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2732 if (scid) {
2733 chan = l2cap_get_chan_by_scid(conn, scid);
2734 if (!chan)
2735 return -EFAULT;
2736 } else {
2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2738 if (!chan)
2739 return -EFAULT;
2742 sk = chan->sk;
2744 switch (result) {
2745 case L2CAP_CR_SUCCESS:
2746 l2cap_state_change(chan, BT_CONFIG);
2747 chan->ident = 0;
2748 chan->dcid = dcid;
2749 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2751 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2752 break;
2754 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2755 l2cap_build_conf_req(chan, req), req);
2756 chan->num_conf_req++;
2757 break;
2759 case L2CAP_CR_PEND:
2760 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2761 break;
2763 default:
2764 l2cap_chan_del(chan, ECONNREFUSED);
2765 break;
2768 release_sock(sk);
2769 return 0;
2772 static inline void set_default_fcs(struct l2cap_chan *chan)
2774 /* FCS is enabled only in ERTM or streaming mode, if one or both
2775 * sides request it.
2777 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2778 chan->fcs = L2CAP_FCS_NONE;
2779 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2780 chan->fcs = L2CAP_FCS_CRC16;
2783 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2785 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2786 u16 dcid, flags;
2787 u8 rsp[64];
2788 struct l2cap_chan *chan;
2789 struct sock *sk;
2790 int len;
2792 dcid = __le16_to_cpu(req->dcid);
2793 flags = __le16_to_cpu(req->flags);
2795 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2797 chan = l2cap_get_chan_by_scid(conn, dcid);
2798 if (!chan)
2799 return -ENOENT;
2801 sk = chan->sk;
2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2804 struct l2cap_cmd_rej_cid rej;
2806 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2807 rej.scid = cpu_to_le16(chan->scid);
2808 rej.dcid = cpu_to_le16(chan->dcid);
2810 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2811 sizeof(rej), &rej);
2812 goto unlock;
2815 /* Reject if config buffer is too small. */
2816 len = cmd_len - sizeof(*req);
2817 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_REJECT, flags), rsp);
2821 goto unlock;
2824 /* Store config. */
2825 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2826 chan->conf_len += len;
2828 if (flags & 0x0001) {
2829 /* Incomplete config. Send empty response. */
2830 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2831 l2cap_build_conf_rsp(chan, rsp,
2832 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2833 goto unlock;
2836 /* Complete config. */
2837 len = l2cap_parse_conf_req(chan, rsp);
2838 if (len < 0) {
2839 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2840 goto unlock;
2843 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2844 chan->num_conf_rsp++;
2846 /* Reset config buffer. */
2847 chan->conf_len = 0;
2849 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2850 goto unlock;
2852 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2853 set_default_fcs(chan);
2855 l2cap_state_change(chan, BT_CONNECTED);
2857 chan->next_tx_seq = 0;
2858 chan->expected_tx_seq = 0;
2859 skb_queue_head_init(&chan->tx_q);
2860 if (chan->mode == L2CAP_MODE_ERTM)
2861 l2cap_ertm_init(chan);
2863 l2cap_chan_ready(sk);
2864 goto unlock;
2867 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2868 u8 buf[64];
2869 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2870 l2cap_build_conf_req(chan, buf), buf);
2871 chan->num_conf_req++;
2874 /* Got Conf Rsp PENDING from remote side and asume we sent
2875 Conf Rsp PENDING in the code above */
2876 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2877 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2879 /* check compatibility */
2881 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2882 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2885 l2cap_build_conf_rsp(chan, rsp,
2886 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2889 unlock:
2890 release_sock(sk);
2891 return 0;
2894 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2897 u16 scid, flags, result;
2898 struct l2cap_chan *chan;
2899 struct sock *sk;
2900 int len = cmd->len - sizeof(*rsp);
2902 scid = __le16_to_cpu(rsp->scid);
2903 flags = __le16_to_cpu(rsp->flags);
2904 result = __le16_to_cpu(rsp->result);
2906 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2907 scid, flags, result);
2909 chan = l2cap_get_chan_by_scid(conn, scid);
2910 if (!chan)
2911 return 0;
2913 sk = chan->sk;
2915 switch (result) {
2916 case L2CAP_CONF_SUCCESS:
2917 l2cap_conf_rfc_get(chan, rsp->data, len);
2918 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2919 break;
2921 case L2CAP_CONF_PENDING:
2922 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2924 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2925 char buf[64];
2927 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2928 buf, &result);
2929 if (len < 0) {
2930 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2931 goto done;
2934 /* check compatibility */
2936 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2937 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2939 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2940 l2cap_build_conf_rsp(chan, buf,
2941 L2CAP_CONF_SUCCESS, 0x0000), buf);
2943 goto done;
2945 case L2CAP_CONF_UNACCEPT:
2946 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2947 char req[64];
2949 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2950 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2951 goto done;
2954 /* throw out any old stored conf requests */
2955 result = L2CAP_CONF_SUCCESS;
2956 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2957 req, &result);
2958 if (len < 0) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2960 goto done;
2963 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2964 L2CAP_CONF_REQ, len, req);
2965 chan->num_conf_req++;
2966 if (result != L2CAP_CONF_SUCCESS)
2967 goto done;
2968 break;
2971 default:
2972 sk->sk_err = ECONNRESET;
2973 __set_chan_timer(chan,
2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2975 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2976 goto done;
2979 if (flags & 0x01)
2980 goto done;
2982 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2984 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2985 set_default_fcs(chan);
2987 l2cap_state_change(chan, BT_CONNECTED);
2988 chan->next_tx_seq = 0;
2989 chan->expected_tx_seq = 0;
2990 skb_queue_head_init(&chan->tx_q);
2991 if (chan->mode == L2CAP_MODE_ERTM)
2992 l2cap_ertm_init(chan);
2994 l2cap_chan_ready(sk);
2997 done:
2998 release_sock(sk);
2999 return 0;
3002 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3004 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3005 struct l2cap_disconn_rsp rsp;
3006 u16 dcid, scid;
3007 struct l2cap_chan *chan;
3008 struct sock *sk;
3010 scid = __le16_to_cpu(req->scid);
3011 dcid = __le16_to_cpu(req->dcid);
3013 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3015 chan = l2cap_get_chan_by_scid(conn, dcid);
3016 if (!chan)
3017 return 0;
3019 sk = chan->sk;
3021 rsp.dcid = cpu_to_le16(chan->scid);
3022 rsp.scid = cpu_to_le16(chan->dcid);
3023 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3025 sk->sk_shutdown = SHUTDOWN_MASK;
3027 l2cap_chan_del(chan, ECONNRESET);
3028 release_sock(sk);
3030 chan->ops->close(chan->data);
3031 return 0;
3034 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3036 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3037 u16 dcid, scid;
3038 struct l2cap_chan *chan;
3039 struct sock *sk;
3041 scid = __le16_to_cpu(rsp->scid);
3042 dcid = __le16_to_cpu(rsp->dcid);
3044 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3046 chan = l2cap_get_chan_by_scid(conn, scid);
3047 if (!chan)
3048 return 0;
3050 sk = chan->sk;
3052 l2cap_chan_del(chan, 0);
3053 release_sock(sk);
3055 chan->ops->close(chan->data);
3056 return 0;
3059 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3061 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3062 u16 type;
3064 type = __le16_to_cpu(req->type);
3066 BT_DBG("type 0x%4.4x", type);
3068 if (type == L2CAP_IT_FEAT_MASK) {
3069 u8 buf[8];
3070 u32 feat_mask = l2cap_feat_mask;
3071 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3072 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3073 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3074 if (!disable_ertm)
3075 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3076 | L2CAP_FEAT_FCS;
3077 if (enable_hs)
3078 feat_mask |= L2CAP_FEAT_EXT_FLOW
3079 | L2CAP_FEAT_EXT_WINDOW;
3081 put_unaligned_le32(feat_mask, rsp->data);
3082 l2cap_send_cmd(conn, cmd->ident,
3083 L2CAP_INFO_RSP, sizeof(buf), buf);
3084 } else if (type == L2CAP_IT_FIXED_CHAN) {
3085 u8 buf[12];
3086 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3088 if (enable_hs)
3089 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3090 else
3091 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3093 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3094 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3095 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3096 l2cap_send_cmd(conn, cmd->ident,
3097 L2CAP_INFO_RSP, sizeof(buf), buf);
3098 } else {
3099 struct l2cap_info_rsp rsp;
3100 rsp.type = cpu_to_le16(type);
3101 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3102 l2cap_send_cmd(conn, cmd->ident,
3103 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3106 return 0;
3109 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3111 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3112 u16 type, result;
3114 type = __le16_to_cpu(rsp->type);
3115 result = __le16_to_cpu(rsp->result);
3117 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3119 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3120 if (cmd->ident != conn->info_ident ||
3121 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3122 return 0;
3124 cancel_delayed_work(&conn->info_timer);
3126 if (result != L2CAP_IR_SUCCESS) {
3127 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3128 conn->info_ident = 0;
3130 l2cap_conn_start(conn);
3132 return 0;
3135 if (type == L2CAP_IT_FEAT_MASK) {
3136 conn->feat_mask = get_unaligned_le32(rsp->data);
3138 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3139 struct l2cap_info_req req;
3140 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3142 conn->info_ident = l2cap_get_ident(conn);
3144 l2cap_send_cmd(conn, conn->info_ident,
3145 L2CAP_INFO_REQ, sizeof(req), &req);
3146 } else {
3147 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3148 conn->info_ident = 0;
3150 l2cap_conn_start(conn);
3152 } else if (type == L2CAP_IT_FIXED_CHAN) {
3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3154 conn->info_ident = 0;
3156 l2cap_conn_start(conn);
3159 return 0;
3162 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3163 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3164 void *data)
3166 struct l2cap_create_chan_req *req = data;
3167 struct l2cap_create_chan_rsp rsp;
3168 u16 psm, scid;
3170 if (cmd_len != sizeof(*req))
3171 return -EPROTO;
3173 if (!enable_hs)
3174 return -EINVAL;
3176 psm = le16_to_cpu(req->psm);
3177 scid = le16_to_cpu(req->scid);
3179 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3181 /* Placeholder: Always reject */
3182 rsp.dcid = 0;
3183 rsp.scid = cpu_to_le16(scid);
3184 rsp.result = L2CAP_CR_NO_MEM;
3185 rsp.status = L2CAP_CS_NO_INFO;
3187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3188 sizeof(rsp), &rsp);
3190 return 0;
3193 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3194 struct l2cap_cmd_hdr *cmd, void *data)
3196 BT_DBG("conn %p", conn);
3198 return l2cap_connect_rsp(conn, cmd, data);
3201 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3202 u16 icid, u16 result)
3204 struct l2cap_move_chan_rsp rsp;
3206 BT_DBG("icid %d, result %d", icid, result);
3208 rsp.icid = cpu_to_le16(icid);
3209 rsp.result = cpu_to_le16(result);
3211 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3214 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3215 struct l2cap_chan *chan, u16 icid, u16 result)
3217 struct l2cap_move_chan_cfm cfm;
3218 u8 ident;
3220 BT_DBG("icid %d, result %d", icid, result);
3222 ident = l2cap_get_ident(conn);
3223 if (chan)
3224 chan->ident = ident;
3226 cfm.icid = cpu_to_le16(icid);
3227 cfm.result = cpu_to_le16(result);
3229 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3232 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3233 u16 icid)
3235 struct l2cap_move_chan_cfm_rsp rsp;
3237 BT_DBG("icid %d", icid);
3239 rsp.icid = cpu_to_le16(icid);
3240 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3243 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3246 struct l2cap_move_chan_req *req = data;
3247 u16 icid = 0;
3248 u16 result = L2CAP_MR_NOT_ALLOWED;
3250 if (cmd_len != sizeof(*req))
3251 return -EPROTO;
3253 icid = le16_to_cpu(req->icid);
3255 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3257 if (!enable_hs)
3258 return -EINVAL;
3260 /* Placeholder: Always refuse */
3261 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3263 return 0;
3266 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3267 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3269 struct l2cap_move_chan_rsp *rsp = data;
3270 u16 icid, result;
3272 if (cmd_len != sizeof(*rsp))
3273 return -EPROTO;
3275 icid = le16_to_cpu(rsp->icid);
3276 result = le16_to_cpu(rsp->result);
3278 BT_DBG("icid %d, result %d", icid, result);
3280 /* Placeholder: Always unconfirmed */
3281 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3283 return 0;
3286 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3287 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3289 struct l2cap_move_chan_cfm *cfm = data;
3290 u16 icid, result;
3292 if (cmd_len != sizeof(*cfm))
3293 return -EPROTO;
3295 icid = le16_to_cpu(cfm->icid);
3296 result = le16_to_cpu(cfm->result);
3298 BT_DBG("icid %d, result %d", icid, result);
3300 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3302 return 0;
3305 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3306 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3308 struct l2cap_move_chan_cfm_rsp *rsp = data;
3309 u16 icid;
3311 if (cmd_len != sizeof(*rsp))
3312 return -EPROTO;
3314 icid = le16_to_cpu(rsp->icid);
3316 BT_DBG("icid %d", icid);
3318 return 0;
3321 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3322 u16 to_multiplier)
3324 u16 max_latency;
3326 if (min > max || min < 6 || max > 3200)
3327 return -EINVAL;
3329 if (to_multiplier < 10 || to_multiplier > 3200)
3330 return -EINVAL;
3332 if (max >= to_multiplier * 8)
3333 return -EINVAL;
3335 max_latency = (to_multiplier * 8 / max) - 1;
3336 if (latency > 499 || latency > max_latency)
3337 return -EINVAL;
3339 return 0;
3342 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3343 struct l2cap_cmd_hdr *cmd, u8 *data)
3345 struct hci_conn *hcon = conn->hcon;
3346 struct l2cap_conn_param_update_req *req;
3347 struct l2cap_conn_param_update_rsp rsp;
3348 u16 min, max, latency, to_multiplier, cmd_len;
3349 int err;
3351 if (!(hcon->link_mode & HCI_LM_MASTER))
3352 return -EINVAL;
3354 cmd_len = __le16_to_cpu(cmd->len);
3355 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3356 return -EPROTO;
3358 req = (struct l2cap_conn_param_update_req *) data;
3359 min = __le16_to_cpu(req->min);
3360 max = __le16_to_cpu(req->max);
3361 latency = __le16_to_cpu(req->latency);
3362 to_multiplier = __le16_to_cpu(req->to_multiplier);
3364 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3365 min, max, latency, to_multiplier);
3367 memset(&rsp, 0, sizeof(rsp));
3369 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3370 if (err)
3371 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3372 else
3373 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3375 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3376 sizeof(rsp), &rsp);
3378 if (!err)
3379 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3381 return 0;
3384 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3385 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3387 int err = 0;
3389 switch (cmd->code) {
3390 case L2CAP_COMMAND_REJ:
3391 l2cap_command_rej(conn, cmd, data);
3392 break;
3394 case L2CAP_CONN_REQ:
3395 err = l2cap_connect_req(conn, cmd, data);
3396 break;
3398 case L2CAP_CONN_RSP:
3399 err = l2cap_connect_rsp(conn, cmd, data);
3400 break;
3402 case L2CAP_CONF_REQ:
3403 err = l2cap_config_req(conn, cmd, cmd_len, data);
3404 break;
3406 case L2CAP_CONF_RSP:
3407 err = l2cap_config_rsp(conn, cmd, data);
3408 break;
3410 case L2CAP_DISCONN_REQ:
3411 err = l2cap_disconnect_req(conn, cmd, data);
3412 break;
3414 case L2CAP_DISCONN_RSP:
3415 err = l2cap_disconnect_rsp(conn, cmd, data);
3416 break;
3418 case L2CAP_ECHO_REQ:
3419 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3420 break;
3422 case L2CAP_ECHO_RSP:
3423 break;
3425 case L2CAP_INFO_REQ:
3426 err = l2cap_information_req(conn, cmd, data);
3427 break;
3429 case L2CAP_INFO_RSP:
3430 err = l2cap_information_rsp(conn, cmd, data);
3431 break;
3433 case L2CAP_CREATE_CHAN_REQ:
3434 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3435 break;
3437 case L2CAP_CREATE_CHAN_RSP:
3438 err = l2cap_create_channel_rsp(conn, cmd, data);
3439 break;
3441 case L2CAP_MOVE_CHAN_REQ:
3442 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3443 break;
3445 case L2CAP_MOVE_CHAN_RSP:
3446 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3447 break;
3449 case L2CAP_MOVE_CHAN_CFM:
3450 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3451 break;
3453 case L2CAP_MOVE_CHAN_CFM_RSP:
3454 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3455 break;
3457 default:
3458 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3459 err = -EINVAL;
3460 break;
3463 return err;
3466 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3467 struct l2cap_cmd_hdr *cmd, u8 *data)
3469 switch (cmd->code) {
3470 case L2CAP_COMMAND_REJ:
3471 return 0;
3473 case L2CAP_CONN_PARAM_UPDATE_REQ:
3474 return l2cap_conn_param_update_req(conn, cmd, data);
3476 case L2CAP_CONN_PARAM_UPDATE_RSP:
3477 return 0;
3479 default:
3480 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3481 return -EINVAL;
3485 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3486 struct sk_buff *skb)
3488 u8 *data = skb->data;
3489 int len = skb->len;
3490 struct l2cap_cmd_hdr cmd;
3491 int err;
3493 l2cap_raw_recv(conn, skb);
3495 while (len >= L2CAP_CMD_HDR_SIZE) {
3496 u16 cmd_len;
3497 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3498 data += L2CAP_CMD_HDR_SIZE;
3499 len -= L2CAP_CMD_HDR_SIZE;
3501 cmd_len = le16_to_cpu(cmd.len);
3503 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3505 if (cmd_len > len || !cmd.ident) {
3506 BT_DBG("corrupted command");
3507 break;
3510 if (conn->hcon->type == LE_LINK)
3511 err = l2cap_le_sig_cmd(conn, &cmd, data);
3512 else
3513 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3515 if (err) {
3516 struct l2cap_cmd_rej_unk rej;
3518 BT_ERR("Wrong link type (%d)", err);
3520 /* FIXME: Map err to a valid reason */
3521 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3522 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3525 data += cmd_len;
3526 len -= cmd_len;
3529 kfree_skb(skb);
3532 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3534 u16 our_fcs, rcv_fcs;
3535 int hdr_size;
3537 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3538 hdr_size = L2CAP_EXT_HDR_SIZE;
3539 else
3540 hdr_size = L2CAP_ENH_HDR_SIZE;
3542 if (chan->fcs == L2CAP_FCS_CRC16) {
3543 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3544 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3545 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3547 if (our_fcs != rcv_fcs)
3548 return -EBADMSG;
3550 return 0;
3553 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3555 u32 control = 0;
3557 chan->frames_sent = 0;
3559 control |= __set_reqseq(chan, chan->buffer_seq);
3561 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3562 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3563 l2cap_send_sframe(chan, control);
3564 set_bit(CONN_RNR_SENT, &chan->conn_state);
3567 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3568 l2cap_retransmit_frames(chan);
3570 l2cap_ertm_send(chan);
3572 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3573 chan->frames_sent == 0) {
3574 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3575 l2cap_send_sframe(chan, control);
3579 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3581 struct sk_buff *next_skb;
3582 int tx_seq_offset, next_tx_seq_offset;
3584 bt_cb(skb)->tx_seq = tx_seq;
3585 bt_cb(skb)->sar = sar;
3587 next_skb = skb_peek(&chan->srej_q);
3589 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3591 while (next_skb) {
3592 if (bt_cb(next_skb)->tx_seq == tx_seq)
3593 return -EINVAL;
3595 next_tx_seq_offset = __seq_offset(chan,
3596 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3598 if (next_tx_seq_offset > tx_seq_offset) {
3599 __skb_queue_before(&chan->srej_q, next_skb, skb);
3600 return 0;
3603 if (skb_queue_is_last(&chan->srej_q, next_skb))
3604 next_skb = NULL;
3605 else
3606 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3609 __skb_queue_tail(&chan->srej_q, skb);
3611 return 0;
3614 static void append_skb_frag(struct sk_buff *skb,
3615 struct sk_buff *new_frag, struct sk_buff **last_frag)
3617 /* skb->len reflects data in skb as well as all fragments
3618 * skb->data_len reflects only data in fragments
3620 if (!skb_has_frag_list(skb))
3621 skb_shinfo(skb)->frag_list = new_frag;
3623 new_frag->next = NULL;
3625 (*last_frag)->next = new_frag;
3626 *last_frag = new_frag;
3628 skb->len += new_frag->len;
3629 skb->data_len += new_frag->len;
3630 skb->truesize += new_frag->truesize;
3633 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3635 int err = -EINVAL;
3637 switch (__get_ctrl_sar(chan, control)) {
3638 case L2CAP_SAR_UNSEGMENTED:
3639 if (chan->sdu)
3640 break;
3642 err = chan->ops->recv(chan->data, skb);
3643 break;
3645 case L2CAP_SAR_START:
3646 if (chan->sdu)
3647 break;
3649 chan->sdu_len = get_unaligned_le16(skb->data);
3650 skb_pull(skb, L2CAP_SDULEN_SIZE);
3652 if (chan->sdu_len > chan->imtu) {
3653 err = -EMSGSIZE;
3654 break;
3657 if (skb->len >= chan->sdu_len)
3658 break;
3660 chan->sdu = skb;
3661 chan->sdu_last_frag = skb;
3663 skb = NULL;
3664 err = 0;
3665 break;
3667 case L2CAP_SAR_CONTINUE:
3668 if (!chan->sdu)
3669 break;
3671 append_skb_frag(chan->sdu, skb,
3672 &chan->sdu_last_frag);
3673 skb = NULL;
3675 if (chan->sdu->len >= chan->sdu_len)
3676 break;
3678 err = 0;
3679 break;
3681 case L2CAP_SAR_END:
3682 if (!chan->sdu)
3683 break;
3685 append_skb_frag(chan->sdu, skb,
3686 &chan->sdu_last_frag);
3687 skb = NULL;
3689 if (chan->sdu->len != chan->sdu_len)
3690 break;
3692 err = chan->ops->recv(chan->data, chan->sdu);
3694 if (!err) {
3695 /* Reassembly complete */
3696 chan->sdu = NULL;
3697 chan->sdu_last_frag = NULL;
3698 chan->sdu_len = 0;
3700 break;
3703 if (err) {
3704 kfree_skb(skb);
3705 kfree_skb(chan->sdu);
3706 chan->sdu = NULL;
3707 chan->sdu_last_frag = NULL;
3708 chan->sdu_len = 0;
3711 return err;
3714 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3716 u32 control;
3718 BT_DBG("chan %p, Enter local busy", chan);
3720 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3722 control = __set_reqseq(chan, chan->buffer_seq);
3723 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3724 l2cap_send_sframe(chan, control);
3726 set_bit(CONN_RNR_SENT, &chan->conn_state);
3728 __clear_ack_timer(chan);
3731 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3733 u32 control;
3735 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3736 goto done;
3738 control = __set_reqseq(chan, chan->buffer_seq);
3739 control |= __set_ctrl_poll(chan);
3740 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3741 l2cap_send_sframe(chan, control);
3742 chan->retry_count = 1;
3744 __clear_retrans_timer(chan);
3745 __set_monitor_timer(chan);
3747 set_bit(CONN_WAIT_F, &chan->conn_state);
3749 done:
3750 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3751 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3753 BT_DBG("chan %p, Exit local busy", chan);
3756 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3758 if (chan->mode == L2CAP_MODE_ERTM) {
3759 if (busy)
3760 l2cap_ertm_enter_local_busy(chan);
3761 else
3762 l2cap_ertm_exit_local_busy(chan);
3766 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3768 struct sk_buff *skb;
3769 u32 control;
3771 while ((skb = skb_peek(&chan->srej_q)) &&
3772 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3773 int err;
3775 if (bt_cb(skb)->tx_seq != tx_seq)
3776 break;
3778 skb = skb_dequeue(&chan->srej_q);
3779 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3780 err = l2cap_reassemble_sdu(chan, skb, control);
3782 if (err < 0) {
3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3784 break;
3787 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3788 tx_seq = __next_seq(chan, tx_seq);
3792 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3794 struct srej_list *l, *tmp;
3795 u32 control;
3797 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3798 if (l->tx_seq == tx_seq) {
3799 list_del(&l->list);
3800 kfree(l);
3801 return;
3803 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3804 control |= __set_reqseq(chan, l->tx_seq);
3805 l2cap_send_sframe(chan, control);
3806 list_del(&l->list);
3807 list_add_tail(&l->list, &chan->srej_l);
3811 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3813 struct srej_list *new;
3814 u32 control;
3816 while (tx_seq != chan->expected_tx_seq) {
3817 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3818 control |= __set_reqseq(chan, chan->expected_tx_seq);
3819 l2cap_send_sframe(chan, control);
3821 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3822 if (!new)
3823 return -ENOMEM;
3825 new->tx_seq = chan->expected_tx_seq;
3827 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3829 list_add_tail(&new->list, &chan->srej_l);
3832 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3834 return 0;
3837 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3839 u16 tx_seq = __get_txseq(chan, rx_control);
3840 u16 req_seq = __get_reqseq(chan, rx_control);
3841 u8 sar = __get_ctrl_sar(chan, rx_control);
3842 int tx_seq_offset, expected_tx_seq_offset;
3843 int num_to_ack = (chan->tx_win/6) + 1;
3844 int err = 0;
3846 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3847 tx_seq, rx_control);
3849 if (__is_ctrl_final(chan, rx_control) &&
3850 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3851 __clear_monitor_timer(chan);
3852 if (chan->unacked_frames > 0)
3853 __set_retrans_timer(chan);
3854 clear_bit(CONN_WAIT_F, &chan->conn_state);
3857 chan->expected_ack_seq = req_seq;
3858 l2cap_drop_acked_frames(chan);
3860 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3862 /* invalid tx_seq */
3863 if (tx_seq_offset >= chan->tx_win) {
3864 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3865 goto drop;
3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3869 goto drop;
3871 if (tx_seq == chan->expected_tx_seq)
3872 goto expected;
3874 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3875 struct srej_list *first;
3877 first = list_first_entry(&chan->srej_l,
3878 struct srej_list, list);
3879 if (tx_seq == first->tx_seq) {
3880 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3881 l2cap_check_srej_gap(chan, tx_seq);
3883 list_del(&first->list);
3884 kfree(first);
3886 if (list_empty(&chan->srej_l)) {
3887 chan->buffer_seq = chan->buffer_seq_srej;
3888 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3889 l2cap_send_ack(chan);
3890 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3892 } else {
3893 struct srej_list *l;
3895 /* duplicated tx_seq */
3896 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3897 goto drop;
3899 list_for_each_entry(l, &chan->srej_l, list) {
3900 if (l->tx_seq == tx_seq) {
3901 l2cap_resend_srejframe(chan, tx_seq);
3902 return 0;
3906 err = l2cap_send_srejframe(chan, tx_seq);
3907 if (err < 0) {
3908 l2cap_send_disconn_req(chan->conn, chan, -err);
3909 return err;
3912 } else {
3913 expected_tx_seq_offset = __seq_offset(chan,
3914 chan->expected_tx_seq, chan->buffer_seq);
3916 /* duplicated tx_seq */
3917 if (tx_seq_offset < expected_tx_seq_offset)
3918 goto drop;
3920 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3922 BT_DBG("chan %p, Enter SREJ", chan);
3924 INIT_LIST_HEAD(&chan->srej_l);
3925 chan->buffer_seq_srej = chan->buffer_seq;
3927 __skb_queue_head_init(&chan->srej_q);
3928 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3930 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3932 err = l2cap_send_srejframe(chan, tx_seq);
3933 if (err < 0) {
3934 l2cap_send_disconn_req(chan->conn, chan, -err);
3935 return err;
3938 __clear_ack_timer(chan);
3940 return 0;
3942 expected:
3943 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3945 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3946 bt_cb(skb)->tx_seq = tx_seq;
3947 bt_cb(skb)->sar = sar;
3948 __skb_queue_tail(&chan->srej_q, skb);
3949 return 0;
3952 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3953 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3955 if (err < 0) {
3956 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3957 return err;
3960 if (__is_ctrl_final(chan, rx_control)) {
3961 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3962 l2cap_retransmit_frames(chan);
3966 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3967 if (chan->num_acked == num_to_ack - 1)
3968 l2cap_send_ack(chan);
3969 else
3970 __set_ack_timer(chan);
3972 return 0;
3974 drop:
3975 kfree_skb(skb);
3976 return 0;
3979 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3981 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3982 __get_reqseq(chan, rx_control), rx_control);
3984 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3985 l2cap_drop_acked_frames(chan);
3987 if (__is_ctrl_poll(chan, rx_control)) {
3988 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3989 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3990 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3991 (chan->unacked_frames > 0))
3992 __set_retrans_timer(chan);
3994 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3995 l2cap_send_srejtail(chan);
3996 } else {
3997 l2cap_send_i_or_rr_or_rnr(chan);
4000 } else if (__is_ctrl_final(chan, rx_control)) {
4001 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4003 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4004 l2cap_retransmit_frames(chan);
4006 } else {
4007 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4008 (chan->unacked_frames > 0))
4009 __set_retrans_timer(chan);
4011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4012 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4013 l2cap_send_ack(chan);
4014 else
4015 l2cap_ertm_send(chan);
4019 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4021 u16 tx_seq = __get_reqseq(chan, rx_control);
4023 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4025 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4027 chan->expected_ack_seq = tx_seq;
4028 l2cap_drop_acked_frames(chan);
4030 if (__is_ctrl_final(chan, rx_control)) {
4031 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4032 l2cap_retransmit_frames(chan);
4033 } else {
4034 l2cap_retransmit_frames(chan);
4036 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4037 set_bit(CONN_REJ_ACT, &chan->conn_state);
4040 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4042 u16 tx_seq = __get_reqseq(chan, rx_control);
4044 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4046 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4048 if (__is_ctrl_poll(chan, rx_control)) {
4049 chan->expected_ack_seq = tx_seq;
4050 l2cap_drop_acked_frames(chan);
4052 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4053 l2cap_retransmit_one_frame(chan, tx_seq);
4055 l2cap_ertm_send(chan);
4057 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4058 chan->srej_save_reqseq = tx_seq;
4059 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4061 } else if (__is_ctrl_final(chan, rx_control)) {
4062 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4063 chan->srej_save_reqseq == tx_seq)
4064 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4065 else
4066 l2cap_retransmit_one_frame(chan, tx_seq);
4067 } else {
4068 l2cap_retransmit_one_frame(chan, tx_seq);
4069 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4070 chan->srej_save_reqseq = tx_seq;
4071 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4076 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4078 u16 tx_seq = __get_reqseq(chan, rx_control);
4080 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4082 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4083 chan->expected_ack_seq = tx_seq;
4084 l2cap_drop_acked_frames(chan);
4086 if (__is_ctrl_poll(chan, rx_control))
4087 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4089 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4090 __clear_retrans_timer(chan);
4091 if (__is_ctrl_poll(chan, rx_control))
4092 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4093 return;
4096 if (__is_ctrl_poll(chan, rx_control)) {
4097 l2cap_send_srejtail(chan);
4098 } else {
4099 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4100 l2cap_send_sframe(chan, rx_control);
4104 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4106 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4108 if (__is_ctrl_final(chan, rx_control) &&
4109 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4110 __clear_monitor_timer(chan);
4111 if (chan->unacked_frames > 0)
4112 __set_retrans_timer(chan);
4113 clear_bit(CONN_WAIT_F, &chan->conn_state);
4116 switch (__get_ctrl_super(chan, rx_control)) {
4117 case L2CAP_SUPER_RR:
4118 l2cap_data_channel_rrframe(chan, rx_control);
4119 break;
4121 case L2CAP_SUPER_REJ:
4122 l2cap_data_channel_rejframe(chan, rx_control);
4123 break;
4125 case L2CAP_SUPER_SREJ:
4126 l2cap_data_channel_srejframe(chan, rx_control);
4127 break;
4129 case L2CAP_SUPER_RNR:
4130 l2cap_data_channel_rnrframe(chan, rx_control);
4131 break;
4134 kfree_skb(skb);
4135 return 0;
4138 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4140 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4141 u32 control;
4142 u16 req_seq;
4143 int len, next_tx_seq_offset, req_seq_offset;
4145 control = __get_control(chan, skb->data);
4146 skb_pull(skb, __ctrl_size(chan));
4147 len = skb->len;
4150 * We can just drop the corrupted I-frame here.
4151 * Receiver will miss it and start proper recovery
4152 * procedures and ask retransmission.
4154 if (l2cap_check_fcs(chan, skb))
4155 goto drop;
4157 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4158 len -= L2CAP_SDULEN_SIZE;
4160 if (chan->fcs == L2CAP_FCS_CRC16)
4161 len -= L2CAP_FCS_SIZE;
4163 if (len > chan->mps) {
4164 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4165 goto drop;
4168 req_seq = __get_reqseq(chan, control);
4170 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4172 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4173 chan->expected_ack_seq);
4175 /* check for invalid req-seq */
4176 if (req_seq_offset > next_tx_seq_offset) {
4177 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4178 goto drop;
4181 if (!__is_sframe(chan, control)) {
4182 if (len < 0) {
4183 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4184 goto drop;
4187 l2cap_data_channel_iframe(chan, control, skb);
4188 } else {
4189 if (len != 0) {
4190 BT_ERR("%d", len);
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4192 goto drop;
4195 l2cap_data_channel_sframe(chan, control, skb);
4198 return 0;
4200 drop:
4201 kfree_skb(skb);
4202 return 0;
4205 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4207 struct l2cap_chan *chan;
4208 struct sock *sk = NULL;
4209 u32 control;
4210 u16 tx_seq;
4211 int len;
4213 chan = l2cap_get_chan_by_scid(conn, cid);
4214 if (!chan) {
4215 BT_DBG("unknown cid 0x%4.4x", cid);
4216 goto drop;
4219 sk = chan->sk;
4221 BT_DBG("chan %p, len %d", chan, skb->len);
4223 if (chan->state != BT_CONNECTED)
4224 goto drop;
4226 switch (chan->mode) {
4227 case L2CAP_MODE_BASIC:
4228 /* If socket recv buffers overflows we drop data here
4229 * which is *bad* because L2CAP has to be reliable.
4230 * But we don't have any other choice. L2CAP doesn't
4231 * provide flow control mechanism. */
4233 if (chan->imtu < skb->len)
4234 goto drop;
4236 if (!chan->ops->recv(chan->data, skb))
4237 goto done;
4238 break;
4240 case L2CAP_MODE_ERTM:
4241 l2cap_ertm_data_rcv(sk, skb);
4243 goto done;
4245 case L2CAP_MODE_STREAMING:
4246 control = __get_control(chan, skb->data);
4247 skb_pull(skb, __ctrl_size(chan));
4248 len = skb->len;
4250 if (l2cap_check_fcs(chan, skb))
4251 goto drop;
4253 if (__is_sar_start(chan, control))
4254 len -= L2CAP_SDULEN_SIZE;
4256 if (chan->fcs == L2CAP_FCS_CRC16)
4257 len -= L2CAP_FCS_SIZE;
4259 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4260 goto drop;
4262 tx_seq = __get_txseq(chan, control);
4264 if (chan->expected_tx_seq != tx_seq) {
4265 /* Frame(s) missing - must discard partial SDU */
4266 kfree_skb(chan->sdu);
4267 chan->sdu = NULL;
4268 chan->sdu_last_frag = NULL;
4269 chan->sdu_len = 0;
4271 /* TODO: Notify userland of missing data */
4274 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4276 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4277 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4279 goto done;
4281 default:
4282 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4283 break;
4286 drop:
4287 kfree_skb(skb);
4289 done:
4290 if (sk)
4291 release_sock(sk);
4293 return 0;
4296 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4298 struct sock *sk = NULL;
4299 struct l2cap_chan *chan;
4301 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4302 if (!chan)
4303 goto drop;
4305 sk = chan->sk;
4307 lock_sock(sk);
4309 BT_DBG("sk %p, len %d", sk, skb->len);
4311 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4312 goto drop;
4314 if (chan->imtu < skb->len)
4315 goto drop;
4317 if (!chan->ops->recv(chan->data, skb))
4318 goto done;
4320 drop:
4321 kfree_skb(skb);
4323 done:
4324 if (sk)
4325 release_sock(sk);
4326 return 0;
4329 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4331 struct sock *sk = NULL;
4332 struct l2cap_chan *chan;
4334 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4335 if (!chan)
4336 goto drop;
4338 sk = chan->sk;
4340 lock_sock(sk);
4342 BT_DBG("sk %p, len %d", sk, skb->len);
4344 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4345 goto drop;
4347 if (chan->imtu < skb->len)
4348 goto drop;
4350 if (!chan->ops->recv(chan->data, skb))
4351 goto done;
4353 drop:
4354 kfree_skb(skb);
4356 done:
4357 if (sk)
4358 release_sock(sk);
4359 return 0;
4362 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4364 struct l2cap_hdr *lh = (void *) skb->data;
4365 u16 cid, len;
4366 __le16 psm;
4368 skb_pull(skb, L2CAP_HDR_SIZE);
4369 cid = __le16_to_cpu(lh->cid);
4370 len = __le16_to_cpu(lh->len);
4372 if (len != skb->len) {
4373 kfree_skb(skb);
4374 return;
4377 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4379 switch (cid) {
4380 case L2CAP_CID_LE_SIGNALING:
4381 case L2CAP_CID_SIGNALING:
4382 l2cap_sig_channel(conn, skb);
4383 break;
4385 case L2CAP_CID_CONN_LESS:
4386 psm = get_unaligned_le16(skb->data);
4387 skb_pull(skb, 2);
4388 l2cap_conless_channel(conn, psm, skb);
4389 break;
4391 case L2CAP_CID_LE_DATA:
4392 l2cap_att_channel(conn, cid, skb);
4393 break;
4395 case L2CAP_CID_SMP:
4396 if (smp_sig_channel(conn, skb))
4397 l2cap_conn_del(conn->hcon, EACCES);
4398 break;
4400 default:
4401 l2cap_data_channel(conn, cid, skb);
4402 break;
4406 /* ---- L2CAP interface with lower layer (HCI) ---- */
4408 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4410 int exact = 0, lm1 = 0, lm2 = 0;
4411 struct l2cap_chan *c;
4413 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4415 /* Find listening sockets and check their link_mode */
4416 read_lock(&chan_list_lock);
4417 list_for_each_entry(c, &chan_list, global_l) {
4418 struct sock *sk = c->sk;
4420 if (c->state != BT_LISTEN)
4421 continue;
4423 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4424 lm1 |= HCI_LM_ACCEPT;
4425 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4426 lm1 |= HCI_LM_MASTER;
4427 exact++;
4428 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4429 lm2 |= HCI_LM_ACCEPT;
4430 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4431 lm2 |= HCI_LM_MASTER;
4434 read_unlock(&chan_list_lock);
4436 return exact ? lm1 : lm2;
4439 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4441 struct l2cap_conn *conn;
4443 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4445 if (!status) {
4446 conn = l2cap_conn_add(hcon, status);
4447 if (conn)
4448 l2cap_conn_ready(conn);
4449 } else
4450 l2cap_conn_del(hcon, bt_to_errno(status));
4452 return 0;
4455 int l2cap_disconn_ind(struct hci_conn *hcon)
4457 struct l2cap_conn *conn = hcon->l2cap_data;
4459 BT_DBG("hcon %p", hcon);
4461 if (!conn)
4462 return HCI_ERROR_REMOTE_USER_TERM;
4463 return conn->disc_reason;
4466 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4468 BT_DBG("hcon %p reason %d", hcon, reason);
4470 l2cap_conn_del(hcon, bt_to_errno(reason));
4471 return 0;
4474 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4477 return;
4479 if (encrypt == 0x00) {
4480 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4481 __clear_chan_timer(chan);
4482 __set_chan_timer(chan,
4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4484 } else if (chan->sec_level == BT_SECURITY_HIGH)
4485 l2cap_chan_close(chan, ECONNREFUSED);
4486 } else {
4487 if (chan->sec_level == BT_SECURITY_MEDIUM)
4488 __clear_chan_timer(chan);
4492 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4494 struct l2cap_conn *conn = hcon->l2cap_data;
4495 struct l2cap_chan *chan;
4497 if (!conn)
4498 return 0;
4500 BT_DBG("conn %p", conn);
4502 if (hcon->type == LE_LINK) {
4503 smp_distribute_keys(conn, 0);
4504 cancel_delayed_work(&conn->security_timer);
4507 rcu_read_lock();
4509 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4510 struct sock *sk = chan->sk;
4512 bh_lock_sock(sk);
4514 BT_DBG("chan->scid %d", chan->scid);
4516 if (chan->scid == L2CAP_CID_LE_DATA) {
4517 if (!status && encrypt) {
4518 chan->sec_level = hcon->sec_level;
4519 l2cap_chan_ready(sk);
4522 bh_unlock_sock(sk);
4523 continue;
4526 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4527 bh_unlock_sock(sk);
4528 continue;
4531 if (!status && (chan->state == BT_CONNECTED ||
4532 chan->state == BT_CONFIG)) {
4533 l2cap_check_encryption(chan, encrypt);
4534 bh_unlock_sock(sk);
4535 continue;
4538 if (chan->state == BT_CONNECT) {
4539 if (!status) {
4540 struct l2cap_conn_req req;
4541 req.scid = cpu_to_le16(chan->scid);
4542 req.psm = chan->psm;
4544 chan->ident = l2cap_get_ident(conn);
4545 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4547 l2cap_send_cmd(conn, chan->ident,
4548 L2CAP_CONN_REQ, sizeof(req), &req);
4549 } else {
4550 __clear_chan_timer(chan);
4551 __set_chan_timer(chan,
4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4554 } else if (chan->state == BT_CONNECT2) {
4555 struct l2cap_conn_rsp rsp;
4556 __u16 res, stat;
4558 if (!status) {
4559 if (bt_sk(sk)->defer_setup) {
4560 struct sock *parent = bt_sk(sk)->parent;
4561 res = L2CAP_CR_PEND;
4562 stat = L2CAP_CS_AUTHOR_PEND;
4563 if (parent)
4564 parent->sk_data_ready(parent, 0);
4565 } else {
4566 l2cap_state_change(chan, BT_CONFIG);
4567 res = L2CAP_CR_SUCCESS;
4568 stat = L2CAP_CS_NO_INFO;
4570 } else {
4571 l2cap_state_change(chan, BT_DISCONN);
4572 __set_chan_timer(chan,
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4574 res = L2CAP_CR_SEC_BLOCK;
4575 stat = L2CAP_CS_NO_INFO;
4578 rsp.scid = cpu_to_le16(chan->dcid);
4579 rsp.dcid = cpu_to_le16(chan->scid);
4580 rsp.result = cpu_to_le16(res);
4581 rsp.status = cpu_to_le16(stat);
4582 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4583 sizeof(rsp), &rsp);
4586 bh_unlock_sock(sk);
4589 rcu_read_unlock();
4591 return 0;
4594 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4596 struct l2cap_conn *conn = hcon->l2cap_data;
4598 if (!conn)
4599 conn = l2cap_conn_add(hcon, 0);
4601 if (!conn)
4602 goto drop;
4604 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4606 if (!(flags & ACL_CONT)) {
4607 struct l2cap_hdr *hdr;
4608 struct l2cap_chan *chan;
4609 u16 cid;
4610 int len;
4612 if (conn->rx_len) {
4613 BT_ERR("Unexpected start frame (len %d)", skb->len);
4614 kfree_skb(conn->rx_skb);
4615 conn->rx_skb = NULL;
4616 conn->rx_len = 0;
4617 l2cap_conn_unreliable(conn, ECOMM);
4620 /* Start fragment always begin with Basic L2CAP header */
4621 if (skb->len < L2CAP_HDR_SIZE) {
4622 BT_ERR("Frame is too short (len %d)", skb->len);
4623 l2cap_conn_unreliable(conn, ECOMM);
4624 goto drop;
4627 hdr = (struct l2cap_hdr *) skb->data;
4628 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4629 cid = __le16_to_cpu(hdr->cid);
4631 if (len == skb->len) {
4632 /* Complete frame received */
4633 l2cap_recv_frame(conn, skb);
4634 return 0;
4637 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4639 if (skb->len > len) {
4640 BT_ERR("Frame is too long (len %d, expected len %d)",
4641 skb->len, len);
4642 l2cap_conn_unreliable(conn, ECOMM);
4643 goto drop;
4646 chan = l2cap_get_chan_by_scid(conn, cid);
4648 if (chan && chan->sk) {
4649 struct sock *sk = chan->sk;
4651 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4652 BT_ERR("Frame exceeding recv MTU (len %d, "
4653 "MTU %d)", len,
4654 chan->imtu);
4655 release_sock(sk);
4656 l2cap_conn_unreliable(conn, ECOMM);
4657 goto drop;
4659 release_sock(sk);
4662 /* Allocate skb for the complete frame (with header) */
4663 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4664 if (!conn->rx_skb)
4665 goto drop;
4667 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4668 skb->len);
4669 conn->rx_len = len - skb->len;
4670 } else {
4671 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4673 if (!conn->rx_len) {
4674 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4675 l2cap_conn_unreliable(conn, ECOMM);
4676 goto drop;
4679 if (skb->len > conn->rx_len) {
4680 BT_ERR("Fragment is too long (len %d, expected %d)",
4681 skb->len, conn->rx_len);
4682 kfree_skb(conn->rx_skb);
4683 conn->rx_skb = NULL;
4684 conn->rx_len = 0;
4685 l2cap_conn_unreliable(conn, ECOMM);
4686 goto drop;
4689 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4690 skb->len);
4691 conn->rx_len -= skb->len;
4693 if (!conn->rx_len) {
4694 /* Complete frame received */
4695 l2cap_recv_frame(conn, conn->rx_skb);
4696 conn->rx_skb = NULL;
4700 drop:
4701 kfree_skb(skb);
4702 return 0;
4705 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4707 struct l2cap_chan *c;
4709 read_lock(&chan_list_lock);
4711 list_for_each_entry(c, &chan_list, global_l) {
4712 struct sock *sk = c->sk;
4714 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4715 batostr(&bt_sk(sk)->src),
4716 batostr(&bt_sk(sk)->dst),
4717 c->state, __le16_to_cpu(c->psm),
4718 c->scid, c->dcid, c->imtu, c->omtu,
4719 c->sec_level, c->mode);
4722 read_unlock(&chan_list_lock);
4724 return 0;
4727 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4729 return single_open(file, l2cap_debugfs_show, inode->i_private);
4732 static const struct file_operations l2cap_debugfs_fops = {
4733 .open = l2cap_debugfs_open,
4734 .read = seq_read,
4735 .llseek = seq_lseek,
4736 .release = single_release,
4739 static struct dentry *l2cap_debugfs;
4741 int __init l2cap_init(void)
4743 int err;
4745 err = l2cap_init_sockets();
4746 if (err < 0)
4747 return err;
4749 if (bt_debugfs) {
4750 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4751 bt_debugfs, NULL, &l2cap_debugfs_fops);
4752 if (!l2cap_debugfs)
4753 BT_ERR("Failed to create L2CAP debug file");
4756 return 0;
4759 void l2cap_exit(void)
4761 debugfs_remove(l2cap_debugfs);
4762 l2cap_cleanup_sockets();
4765 module_param(disable_ertm, bool, 0644);
4766 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");