Merge branch 'akpm'
[linux-2.6/next.git] / net / bluetooth / l2cap_core.c
blobebe2703e749accbc0b4c4b03a6b0519bb21492e1
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
59 int disable_ertm;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 struct l2cap_chan *c;
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
98 return NULL;
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
110 return NULL;
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
135 return NULL;
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
159 c = NULL;
160 found:
161 return c;
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 int err;
168 write_lock_bh(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
192 done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock_bh(&chan_list_lock);
201 chan->scid = scid;
203 write_unlock_bh(&chan_list_lock);
205 return 0;
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
217 return 0;
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 BT_DBG("chan %p state %d", chan, chan->state);
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
242 static void l2cap_chan_timeout(unsigned long arg)
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
248 BT_DBG("chan %p state %d", chan, chan->state);
250 bh_lock_sock(sk);
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
268 l2cap_chan_close(chan, reason);
270 bh_unlock_sock(sk);
272 chan->ops->close(chan->data);
273 chan_put(chan);
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 struct l2cap_chan *chan;
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
284 chan->sk = sk;
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292 chan->state = BT_OPEN;
294 atomic_set(&chan->refcnt, 1);
296 return chan;
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
305 chan_put(chan);
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
313 conn->disc_reason = 0x13;
315 chan->conn = conn;
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
340 chan_hold(chan);
342 list_add(&chan->list, &conn->chan_l);
345 /* Delete channel.
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
353 __clear_chan_timer(chan);
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
371 if (err)
372 sk->sk_err = err;
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
384 skb_queue_purge(&chan->tx_q);
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
393 skb_queue_purge(&chan->srej_q);
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
404 struct sock *sk;
406 BT_DBG("parent %p", parent);
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
465 l2cap_chan_del(chan, reason);
466 break;
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
516 auth_type = l2cap_get_auth_type(chan);
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
523 u8 id;
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
531 spin_lock_bh(&conn->lock);
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
536 id = conn->tx_ident;
538 spin_unlock_bh(&conn->lock);
540 return id;
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
548 BT_DBG("code 0x%2.2x", code);
550 if (!skb)
551 return;
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560 hci_send_acl(conn->hcon, skb, flags);
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
571 if (chan->state != BT_CONNECTED)
572 return;
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
607 bt_cb(skb)->force_active = chan->force_active;
609 hci_send_acl(chan->conn->hcon, skb, flags);
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
622 l2cap_send_sframe(chan, control);
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630 static void l2cap_do_start(struct l2cap_chan *chan)
632 struct l2cap_conn *conn = chan->conn;
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
683 struct sock *sk;
684 struct l2cap_disconn_req req;
686 if (!conn)
687 return;
689 sk = chan->sk;
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
709 struct l2cap_chan *chan, *tmp;
711 BT_DBG("conn %p", conn);
713 read_lock(&conn->chan_lock);
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
718 bh_lock_sock(sk);
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 if (parent)
767 parent->sk_data_ready(parent, 0);
769 } else {
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
774 } else {
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 sizeof(rsp), &rsp);
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
784 bh_unlock_sock(sk);
785 continue;
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
794 bh_unlock_sock(sk);
797 read_unlock(&conn->chan_lock);
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
805 struct l2cap_chan *c, *c1 = NULL;
807 read_lock(&chan_list_lock);
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
812 if (state && c->state != state)
813 continue;
815 if (c->scid == cid) {
816 /* Exact match. */
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
819 return c;
822 /* Closest match */
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 c1 = c;
828 read_unlock(&chan_list_lock);
830 return c1;
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
838 BT_DBG("");
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 conn->src);
843 if (!pchan)
844 return;
846 parent = pchan->sk;
848 bh_lock_sock(parent);
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 goto clean;
856 chan = pchan->ops->new_connection(pchan->data);
857 if (!chan)
858 goto clean;
860 sk = chan->sk;
862 write_lock_bh(&conn->chan_lock);
864 hci_conn_hold(conn->hcon);
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
869 bt_accept_enqueue(parent, sk);
871 __l2cap_chan_add(conn, chan);
873 __set_chan_timer(chan, sk->sk_sndtimeo);
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
878 write_unlock_bh(&conn->chan_lock);
880 clean:
881 bh_unlock_sock(parent);
884 static void l2cap_chan_ready(struct sock *sk)
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
889 BT_DBG("sk %p, parent %p", sk, parent);
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
897 if (parent)
898 parent->sk_data_ready(parent, 0);
901 static void l2cap_conn_ready(struct l2cap_conn *conn)
903 struct l2cap_chan *chan;
905 BT_DBG("conn %p", conn);
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
913 read_lock(&conn->chan_lock);
915 list_for_each_entry(chan, &conn->chan_l, list) {
916 struct sock *sk = chan->sk;
918 bh_lock_sock(sk);
920 if (conn->hcon->type == LE_LINK) {
921 if (smp_conn_security(conn, chan->sec_level))
922 l2cap_chan_ready(sk);
924 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
925 __clear_chan_timer(chan);
926 l2cap_state_change(chan, BT_CONNECTED);
927 sk->sk_state_change(sk);
929 } else if (chan->state == BT_CONNECT)
930 l2cap_do_start(chan);
932 bh_unlock_sock(sk);
935 read_unlock(&conn->chan_lock);
938 /* Notify sockets that we cannot guaranty reliability anymore */
939 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
941 struct l2cap_chan *chan;
943 BT_DBG("conn %p", conn);
945 read_lock(&conn->chan_lock);
947 list_for_each_entry(chan, &conn->chan_l, list) {
948 struct sock *sk = chan->sk;
950 if (chan->force_reliable)
951 sk->sk_err = err;
954 read_unlock(&conn->chan_lock);
957 static void l2cap_info_timeout(unsigned long arg)
959 struct l2cap_conn *conn = (void *) arg;
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 conn->info_ident = 0;
964 l2cap_conn_start(conn);
967 static void l2cap_conn_del(struct hci_conn *hcon, int err)
969 struct l2cap_conn *conn = hcon->l2cap_data;
970 struct l2cap_chan *chan, *l;
971 struct sock *sk;
973 if (!conn)
974 return;
976 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
978 kfree_skb(conn->rx_skb);
980 /* Kill channels */
981 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
982 sk = chan->sk;
983 bh_lock_sock(sk);
984 l2cap_chan_del(chan, err);
985 bh_unlock_sock(sk);
986 chan->ops->close(chan->data);
989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 del_timer_sync(&conn->info_timer);
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
993 del_timer(&conn->security_timer);
994 hci_conn_put(hcon);
997 hcon->l2cap_data = NULL;
998 kfree(conn);
1001 static void security_timeout(unsigned long arg)
1003 struct l2cap_conn *conn = (void *) arg;
1005 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1008 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1010 struct l2cap_conn *conn = hcon->l2cap_data;
1012 if (conn || status)
1013 return conn;
1015 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 if (!conn)
1017 return NULL;
1019 hcon->l2cap_data = conn;
1020 conn->hcon = hcon;
1022 BT_DBG("hcon %p conn %p", hcon, conn);
1024 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 conn->mtu = hcon->hdev->le_mtu;
1026 else
1027 conn->mtu = hcon->hdev->acl_mtu;
1029 conn->src = &hcon->hdev->bdaddr;
1030 conn->dst = &hcon->dst;
1032 conn->feat_mask = 0;
1034 spin_lock_init(&conn->lock);
1035 rwlock_init(&conn->chan_lock);
1037 INIT_LIST_HEAD(&conn->chan_l);
1039 if (hcon->type == LE_LINK)
1040 setup_timer(&conn->security_timer, security_timeout,
1041 (unsigned long) conn);
1042 else
1043 setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 (unsigned long) conn);
1046 conn->disc_reason = 0x13;
1048 return conn;
1051 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1053 write_lock_bh(&conn->chan_lock);
1054 __l2cap_chan_add(conn, chan);
1055 write_unlock_bh(&conn->chan_lock);
1058 /* ---- Socket interface ---- */
1060 /* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1063 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1065 struct l2cap_chan *c, *c1 = NULL;
1067 read_lock(&chan_list_lock);
1069 list_for_each_entry(c, &chan_list, global_l) {
1070 struct sock *sk = c->sk;
1072 if (state && c->state != state)
1073 continue;
1075 if (c->psm == psm) {
1076 /* Exact match. */
1077 if (!bacmp(&bt_sk(sk)->src, src)) {
1078 read_unlock(&chan_list_lock);
1079 return c;
1082 /* Closest match */
1083 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1084 c1 = c;
1088 read_unlock(&chan_list_lock);
1090 return c1;
1093 int l2cap_chan_connect(struct l2cap_chan *chan)
1095 struct sock *sk = chan->sk;
1096 bdaddr_t *src = &bt_sk(sk)->src;
1097 bdaddr_t *dst = &bt_sk(sk)->dst;
1098 struct l2cap_conn *conn;
1099 struct hci_conn *hcon;
1100 struct hci_dev *hdev;
1101 __u8 auth_type;
1102 int err;
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1105 chan->psm);
1107 hdev = hci_get_route(dst, src);
1108 if (!hdev)
1109 return -EHOSTUNREACH;
1111 hci_dev_lock_bh(hdev);
1113 auth_type = l2cap_get_auth_type(chan);
1115 if (chan->dcid == L2CAP_CID_LE_DATA)
1116 hcon = hci_connect(hdev, LE_LINK, dst,
1117 chan->sec_level, auth_type);
1118 else
1119 hcon = hci_connect(hdev, ACL_LINK, dst,
1120 chan->sec_level, auth_type);
1122 if (IS_ERR(hcon)) {
1123 err = PTR_ERR(hcon);
1124 goto done;
1127 conn = l2cap_conn_add(hcon, 0);
1128 if (!conn) {
1129 hci_conn_put(hcon);
1130 err = -ENOMEM;
1131 goto done;
1134 /* Update source addr of the socket */
1135 bacpy(src, conn->src);
1137 l2cap_chan_add(conn, chan);
1139 l2cap_state_change(chan, BT_CONNECT);
1140 __set_chan_timer(chan, sk->sk_sndtimeo);
1142 if (hcon->state == BT_CONNECTED) {
1143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1144 __clear_chan_timer(chan);
1145 if (l2cap_check_security(chan))
1146 l2cap_state_change(chan, BT_CONNECTED);
1147 } else
1148 l2cap_do_start(chan);
1151 err = 0;
1153 done:
1154 hci_dev_unlock_bh(hdev);
1155 hci_dev_put(hdev);
1156 return err;
1159 int __l2cap_wait_ack(struct sock *sk)
1161 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1162 DECLARE_WAITQUEUE(wait, current);
1163 int err = 0;
1164 int timeo = HZ/5;
1166 add_wait_queue(sk_sleep(sk), &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 while (chan->unacked_frames > 0 && chan->conn) {
1169 if (!timeo)
1170 timeo = HZ/5;
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1174 break;
1177 release_sock(sk);
1178 timeo = schedule_timeout(timeo);
1179 lock_sock(sk);
1180 set_current_state(TASK_INTERRUPTIBLE);
1182 err = sock_error(sk);
1183 if (err)
1184 break;
1186 set_current_state(TASK_RUNNING);
1187 remove_wait_queue(sk_sleep(sk), &wait);
1188 return err;
1191 static void l2cap_monitor_timeout(unsigned long arg)
1193 struct l2cap_chan *chan = (void *) arg;
1194 struct sock *sk = chan->sk;
1196 BT_DBG("chan %p", chan);
1198 bh_lock_sock(sk);
1199 if (chan->retry_count >= chan->remote_max_tx) {
1200 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1201 bh_unlock_sock(sk);
1202 return;
1205 chan->retry_count++;
1206 __set_monitor_timer(chan);
1208 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1209 bh_unlock_sock(sk);
1212 static void l2cap_retrans_timeout(unsigned long arg)
1214 struct l2cap_chan *chan = (void *) arg;
1215 struct sock *sk = chan->sk;
1217 BT_DBG("chan %p", chan);
1219 bh_lock_sock(sk);
1220 chan->retry_count = 1;
1221 __set_monitor_timer(chan);
1223 set_bit(CONN_WAIT_F, &chan->conn_state);
1225 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1226 bh_unlock_sock(sk);
1229 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1231 struct sk_buff *skb;
1233 while ((skb = skb_peek(&chan->tx_q)) &&
1234 chan->unacked_frames) {
1235 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1236 break;
1238 skb = skb_dequeue(&chan->tx_q);
1239 kfree_skb(skb);
1241 chan->unacked_frames--;
1244 if (!chan->unacked_frames)
1245 __clear_retrans_timer(chan);
1248 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1250 struct hci_conn *hcon = chan->conn->hcon;
1251 u16 flags;
1253 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1255 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 flags = ACL_START_NO_FLUSH;
1257 else
1258 flags = ACL_START;
1260 bt_cb(skb)->force_active = chan->force_active;
1261 hci_send_acl(hcon, skb, flags);
1264 static void l2cap_streaming_send(struct l2cap_chan *chan)
1266 struct sk_buff *skb;
1267 u16 control, fcs;
1269 while ((skb = skb_dequeue(&chan->tx_q))) {
1270 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1271 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1272 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1274 if (chan->fcs == L2CAP_FCS_CRC16) {
1275 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1276 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1279 l2cap_do_send(chan, skb);
1281 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1285 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1287 struct sk_buff *skb, *tx_skb;
1288 u16 control, fcs;
1290 skb = skb_peek(&chan->tx_q);
1291 if (!skb)
1292 return;
1294 do {
1295 if (bt_cb(skb)->tx_seq == tx_seq)
1296 break;
1298 if (skb_queue_is_last(&chan->tx_q, skb))
1299 return;
1301 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1303 if (chan->remote_max_tx &&
1304 bt_cb(skb)->retries == chan->remote_max_tx) {
1305 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1306 return;
1309 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 bt_cb(skb)->retries++;
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 control &= L2CAP_CTRL_SAR;
1314 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 control |= L2CAP_CTRL_FINAL;
1317 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1322 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1324 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1327 l2cap_do_send(chan, tx_skb);
1330 static int l2cap_ertm_send(struct l2cap_chan *chan)
1332 struct sk_buff *skb, *tx_skb;
1333 u16 control, fcs;
1334 int nsent = 0;
1336 if (chan->state != BT_CONNECTED)
1337 return -ENOTCONN;
1339 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1341 if (chan->remote_max_tx &&
1342 bt_cb(skb)->retries == chan->remote_max_tx) {
1343 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1344 break;
1347 tx_skb = skb_clone(skb, GFP_ATOMIC);
1349 bt_cb(skb)->retries++;
1351 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1352 control &= L2CAP_CTRL_SAR;
1354 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 control |= L2CAP_CTRL_FINAL;
1357 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1358 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1359 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1362 if (chan->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1367 l2cap_do_send(chan, tx_skb);
1369 __set_retrans_timer(chan);
1371 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1374 if (bt_cb(skb)->retries == 1)
1375 chan->unacked_frames++;
1377 chan->frames_sent++;
1379 if (skb_queue_is_last(&chan->tx_q, skb))
1380 chan->tx_send_head = NULL;
1381 else
1382 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1384 nsent++;
1387 return nsent;
1390 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1392 int ret;
1394 if (!skb_queue_empty(&chan->tx_q))
1395 chan->tx_send_head = chan->tx_q.next;
1397 chan->next_tx_seq = chan->expected_ack_seq;
1398 ret = l2cap_ertm_send(chan);
1399 return ret;
1402 static void l2cap_send_ack(struct l2cap_chan *chan)
1404 u16 control = 0;
1406 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1408 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 control |= L2CAP_SUPER_RCV_NOT_READY;
1410 set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 l2cap_send_sframe(chan, control);
1412 return;
1415 if (l2cap_ertm_send(chan) > 0)
1416 return;
1418 control |= L2CAP_SUPER_RCV_READY;
1419 l2cap_send_sframe(chan, control);
1422 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1424 struct srej_list *tail;
1425 u16 control;
1427 control = L2CAP_SUPER_SELECT_REJECT;
1428 control |= L2CAP_CTRL_FINAL;
1430 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1433 l2cap_send_sframe(chan, control);
1436 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1438 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1439 struct sk_buff **frag;
1440 int err, sent = 0;
1442 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 return -EFAULT;
1445 sent += count;
1446 len -= count;
1448 /* Continuation fragments (no L2CAP header) */
1449 frag = &skb_shinfo(skb)->frag_list;
1450 while (len) {
1451 count = min_t(unsigned int, conn->mtu, len);
1453 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!*frag)
1455 return err;
1456 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 return -EFAULT;
1459 sent += count;
1460 len -= count;
1462 frag = &(*frag)->next;
1465 return sent;
1468 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1470 struct sock *sk = chan->sk;
1471 struct l2cap_conn *conn = chan->conn;
1472 struct sk_buff *skb;
1473 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1474 struct l2cap_hdr *lh;
1476 BT_DBG("sk %p len %d", sk, (int)len);
1478 count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 skb = bt_skb_send_alloc(sk, count + hlen,
1480 msg->msg_flags & MSG_DONTWAIT, &err);
1481 if (!skb)
1482 return ERR_PTR(err);
1484 /* Create L2CAP header */
1485 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 lh->cid = cpu_to_le16(chan->dcid);
1487 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1488 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1490 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 if (unlikely(err < 0)) {
1492 kfree_skb(skb);
1493 return ERR_PTR(err);
1495 return skb;
1498 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1500 struct sock *sk = chan->sk;
1501 struct l2cap_conn *conn = chan->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE;
1504 struct l2cap_hdr *lh;
1506 BT_DBG("sk %p len %d", sk, (int)len);
1508 count = min_t(unsigned int, (conn->mtu - hlen), len);
1509 skb = bt_skb_send_alloc(sk, count + hlen,
1510 msg->msg_flags & MSG_DONTWAIT, &err);
1511 if (!skb)
1512 return ERR_PTR(err);
1514 /* Create L2CAP header */
1515 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 lh->cid = cpu_to_le16(chan->dcid);
1517 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1519 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1520 if (unlikely(err < 0)) {
1521 kfree_skb(skb);
1522 return ERR_PTR(err);
1524 return skb;
1527 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1529 struct sock *sk = chan->sk;
1530 struct l2cap_conn *conn = chan->conn;
1531 struct sk_buff *skb;
1532 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1533 struct l2cap_hdr *lh;
1535 BT_DBG("sk %p len %d", sk, (int)len);
1537 if (!conn)
1538 return ERR_PTR(-ENOTCONN);
1540 if (sdulen)
1541 hlen += 2;
1543 if (chan->fcs == L2CAP_FCS_CRC16)
1544 hlen += 2;
1546 count = min_t(unsigned int, (conn->mtu - hlen), len);
1547 skb = bt_skb_send_alloc(sk, count + hlen,
1548 msg->msg_flags & MSG_DONTWAIT, &err);
1549 if (!skb)
1550 return ERR_PTR(err);
1552 /* Create L2CAP header */
1553 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1554 lh->cid = cpu_to_le16(chan->dcid);
1555 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1556 put_unaligned_le16(control, skb_put(skb, 2));
1557 if (sdulen)
1558 put_unaligned_le16(sdulen, skb_put(skb, 2));
1560 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1561 if (unlikely(err < 0)) {
1562 kfree_skb(skb);
1563 return ERR_PTR(err);
1566 if (chan->fcs == L2CAP_FCS_CRC16)
1567 put_unaligned_le16(0, skb_put(skb, 2));
1569 bt_cb(skb)->retries = 0;
1570 return skb;
1573 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1575 struct sk_buff *skb;
1576 struct sk_buff_head sar_queue;
1577 u16 control;
1578 size_t size = 0;
1580 skb_queue_head_init(&sar_queue);
1581 control = L2CAP_SDU_START;
1582 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1583 if (IS_ERR(skb))
1584 return PTR_ERR(skb);
1586 __skb_queue_tail(&sar_queue, skb);
1587 len -= chan->remote_mps;
1588 size += chan->remote_mps;
1590 while (len > 0) {
1591 size_t buflen;
1593 if (len > chan->remote_mps) {
1594 control = L2CAP_SDU_CONTINUE;
1595 buflen = chan->remote_mps;
1596 } else {
1597 control = L2CAP_SDU_END;
1598 buflen = len;
1601 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1602 if (IS_ERR(skb)) {
1603 skb_queue_purge(&sar_queue);
1604 return PTR_ERR(skb);
1607 __skb_queue_tail(&sar_queue, skb);
1608 len -= buflen;
1609 size += buflen;
1611 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1612 if (chan->tx_send_head == NULL)
1613 chan->tx_send_head = sar_queue.next;
1615 return size;
1618 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1620 struct sk_buff *skb;
1621 u16 control;
1622 int err;
1624 /* Connectionless channel */
1625 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1626 skb = l2cap_create_connless_pdu(chan, msg, len);
1627 if (IS_ERR(skb))
1628 return PTR_ERR(skb);
1630 l2cap_do_send(chan, skb);
1631 return len;
1634 switch (chan->mode) {
1635 case L2CAP_MODE_BASIC:
1636 /* Check outgoing MTU */
1637 if (len > chan->omtu)
1638 return -EMSGSIZE;
1640 /* Create a basic PDU */
1641 skb = l2cap_create_basic_pdu(chan, msg, len);
1642 if (IS_ERR(skb))
1643 return PTR_ERR(skb);
1645 l2cap_do_send(chan, skb);
1646 err = len;
1647 break;
1649 case L2CAP_MODE_ERTM:
1650 case L2CAP_MODE_STREAMING:
1651 /* Entire SDU fits into one PDU */
1652 if (len <= chan->remote_mps) {
1653 control = L2CAP_SDU_UNSEGMENTED;
1654 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1656 if (IS_ERR(skb))
1657 return PTR_ERR(skb);
1659 __skb_queue_tail(&chan->tx_q, skb);
1661 if (chan->tx_send_head == NULL)
1662 chan->tx_send_head = skb;
1664 } else {
1665 /* Segment SDU into multiples PDUs */
1666 err = l2cap_sar_segment_sdu(chan, msg, len);
1667 if (err < 0)
1668 return err;
1671 if (chan->mode == L2CAP_MODE_STREAMING) {
1672 l2cap_streaming_send(chan);
1673 err = len;
1674 break;
1677 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1678 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1679 err = len;
1680 break;
1683 err = l2cap_ertm_send(chan);
1684 if (err >= 0)
1685 err = len;
1687 break;
1689 default:
1690 BT_DBG("bad state %1.1x", chan->mode);
1691 err = -EBADFD;
1694 return err;
1697 /* Copy frame to all raw sockets on that connection */
1698 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1700 struct sk_buff *nskb;
1701 struct l2cap_chan *chan;
1703 BT_DBG("conn %p", conn);
1705 read_lock(&conn->chan_lock);
1706 list_for_each_entry(chan, &conn->chan_l, list) {
1707 struct sock *sk = chan->sk;
1708 if (chan->chan_type != L2CAP_CHAN_RAW)
1709 continue;
1711 /* Don't send frame to the socket it came from */
1712 if (skb->sk == sk)
1713 continue;
1714 nskb = skb_clone(skb, GFP_ATOMIC);
1715 if (!nskb)
1716 continue;
1718 if (chan->ops->recv(chan->data, nskb))
1719 kfree_skb(nskb);
1721 read_unlock(&conn->chan_lock);
1724 /* ---- L2CAP signalling commands ---- */
1725 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1726 u8 code, u8 ident, u16 dlen, void *data)
1728 struct sk_buff *skb, **frag;
1729 struct l2cap_cmd_hdr *cmd;
1730 struct l2cap_hdr *lh;
1731 int len, count;
1733 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1734 conn, code, ident, dlen);
1736 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1737 count = min_t(unsigned int, conn->mtu, len);
1739 skb = bt_skb_alloc(count, GFP_ATOMIC);
1740 if (!skb)
1741 return NULL;
1743 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1744 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1746 if (conn->hcon->type == LE_LINK)
1747 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1748 else
1749 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1751 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1752 cmd->code = code;
1753 cmd->ident = ident;
1754 cmd->len = cpu_to_le16(dlen);
1756 if (dlen) {
1757 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1758 memcpy(skb_put(skb, count), data, count);
1759 data += count;
1762 len -= skb->len;
1764 /* Continuation fragments (no L2CAP header) */
1765 frag = &skb_shinfo(skb)->frag_list;
1766 while (len) {
1767 count = min_t(unsigned int, conn->mtu, len);
1769 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1770 if (!*frag)
1771 goto fail;
1773 memcpy(skb_put(*frag, count), data, count);
1775 len -= count;
1776 data += count;
1778 frag = &(*frag)->next;
1781 return skb;
1783 fail:
1784 kfree_skb(skb);
1785 return NULL;
1788 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1790 struct l2cap_conf_opt *opt = *ptr;
1791 int len;
1793 len = L2CAP_CONF_OPT_SIZE + opt->len;
1794 *ptr += len;
1796 *type = opt->type;
1797 *olen = opt->len;
1799 switch (opt->len) {
1800 case 1:
1801 *val = *((u8 *) opt->val);
1802 break;
1804 case 2:
1805 *val = get_unaligned_le16(opt->val);
1806 break;
1808 case 4:
1809 *val = get_unaligned_le32(opt->val);
1810 break;
1812 default:
1813 *val = (unsigned long) opt->val;
1814 break;
1817 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1818 return len;
1821 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1823 struct l2cap_conf_opt *opt = *ptr;
1825 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1827 opt->type = type;
1828 opt->len = len;
1830 switch (len) {
1831 case 1:
1832 *((u8 *) opt->val) = val;
1833 break;
1835 case 2:
1836 put_unaligned_le16(val, opt->val);
1837 break;
1839 case 4:
1840 put_unaligned_le32(val, opt->val);
1841 break;
1843 default:
1844 memcpy(opt->val, (void *) val, len);
1845 break;
1848 *ptr += L2CAP_CONF_OPT_SIZE + len;
1851 static void l2cap_ack_timeout(unsigned long arg)
1853 struct l2cap_chan *chan = (void *) arg;
1855 bh_lock_sock(chan->sk);
1856 l2cap_send_ack(chan);
1857 bh_unlock_sock(chan->sk);
1860 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1862 struct sock *sk = chan->sk;
1864 chan->expected_ack_seq = 0;
1865 chan->unacked_frames = 0;
1866 chan->buffer_seq = 0;
1867 chan->num_acked = 0;
1868 chan->frames_sent = 0;
1870 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1871 (unsigned long) chan);
1872 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1873 (unsigned long) chan);
1874 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1876 skb_queue_head_init(&chan->srej_q);
1878 INIT_LIST_HEAD(&chan->srej_l);
1881 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1884 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1886 switch (mode) {
1887 case L2CAP_MODE_STREAMING:
1888 case L2CAP_MODE_ERTM:
1889 if (l2cap_mode_supported(mode, remote_feat_mask))
1890 return mode;
1891 /* fall through */
1892 default:
1893 return L2CAP_MODE_BASIC;
1897 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1899 struct l2cap_conf_req *req = data;
1900 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1901 void *ptr = req->data;
1903 BT_DBG("chan %p", chan);
1905 if (chan->num_conf_req || chan->num_conf_rsp)
1906 goto done;
1908 switch (chan->mode) {
1909 case L2CAP_MODE_STREAMING:
1910 case L2CAP_MODE_ERTM:
1911 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1912 break;
1914 /* fall through */
1915 default:
1916 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1917 break;
1920 done:
1921 if (chan->imtu != L2CAP_DEFAULT_MTU)
1922 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1924 switch (chan->mode) {
1925 case L2CAP_MODE_BASIC:
1926 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1927 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1928 break;
1930 rfc.mode = L2CAP_MODE_BASIC;
1931 rfc.txwin_size = 0;
1932 rfc.max_transmit = 0;
1933 rfc.retrans_timeout = 0;
1934 rfc.monitor_timeout = 0;
1935 rfc.max_pdu_size = 0;
1937 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1938 (unsigned long) &rfc);
1939 break;
1941 case L2CAP_MODE_ERTM:
1942 rfc.mode = L2CAP_MODE_ERTM;
1943 rfc.txwin_size = chan->tx_win;
1944 rfc.max_transmit = chan->max_tx;
1945 rfc.retrans_timeout = 0;
1946 rfc.monitor_timeout = 0;
1947 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1948 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1949 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1951 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1952 (unsigned long) &rfc);
1954 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1955 break;
1957 if (chan->fcs == L2CAP_FCS_NONE ||
1958 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1959 chan->fcs = L2CAP_FCS_NONE;
1960 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1962 break;
1964 case L2CAP_MODE_STREAMING:
1965 rfc.mode = L2CAP_MODE_STREAMING;
1966 rfc.txwin_size = 0;
1967 rfc.max_transmit = 0;
1968 rfc.retrans_timeout = 0;
1969 rfc.monitor_timeout = 0;
1970 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1971 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1972 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1974 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1975 (unsigned long) &rfc);
1977 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1978 break;
1980 if (chan->fcs == L2CAP_FCS_NONE ||
1981 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1982 chan->fcs = L2CAP_FCS_NONE;
1983 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1985 break;
1988 req->dcid = cpu_to_le16(chan->dcid);
1989 req->flags = cpu_to_le16(0);
1991 return ptr - data;
1994 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1996 struct l2cap_conf_rsp *rsp = data;
1997 void *ptr = rsp->data;
1998 void *req = chan->conf_req;
1999 int len = chan->conf_len;
2000 int type, hint, olen;
2001 unsigned long val;
2002 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2003 u16 mtu = L2CAP_DEFAULT_MTU;
2004 u16 result = L2CAP_CONF_SUCCESS;
2006 BT_DBG("chan %p", chan);
2008 while (len >= L2CAP_CONF_OPT_SIZE) {
2009 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2011 hint = type & L2CAP_CONF_HINT;
2012 type &= L2CAP_CONF_MASK;
2014 switch (type) {
2015 case L2CAP_CONF_MTU:
2016 mtu = val;
2017 break;
2019 case L2CAP_CONF_FLUSH_TO:
2020 chan->flush_to = val;
2021 break;
2023 case L2CAP_CONF_QOS:
2024 break;
2026 case L2CAP_CONF_RFC:
2027 if (olen == sizeof(rfc))
2028 memcpy(&rfc, (void *) val, olen);
2029 break;
2031 case L2CAP_CONF_FCS:
2032 if (val == L2CAP_FCS_NONE)
2033 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2035 break;
2037 default:
2038 if (hint)
2039 break;
2041 result = L2CAP_CONF_UNKNOWN;
2042 *((u8 *) ptr++) = type;
2043 break;
2047 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2048 goto done;
2050 switch (chan->mode) {
2051 case L2CAP_MODE_STREAMING:
2052 case L2CAP_MODE_ERTM:
2053 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2054 chan->mode = l2cap_select_mode(rfc.mode,
2055 chan->conn->feat_mask);
2056 break;
2059 if (chan->mode != rfc.mode)
2060 return -ECONNREFUSED;
2062 break;
2065 done:
2066 if (chan->mode != rfc.mode) {
2067 result = L2CAP_CONF_UNACCEPT;
2068 rfc.mode = chan->mode;
2070 if (chan->num_conf_rsp == 1)
2071 return -ECONNREFUSED;
2073 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2074 sizeof(rfc), (unsigned long) &rfc);
2078 if (result == L2CAP_CONF_SUCCESS) {
2079 /* Configure output options and let the other side know
2080 * which ones we don't like. */
2082 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2083 result = L2CAP_CONF_UNACCEPT;
2084 else {
2085 chan->omtu = mtu;
2086 set_bit(CONF_MTU_DONE, &chan->conf_state);
2088 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2090 switch (rfc.mode) {
2091 case L2CAP_MODE_BASIC:
2092 chan->fcs = L2CAP_FCS_NONE;
2093 set_bit(CONF_MODE_DONE, &chan->conf_state);
2094 break;
2096 case L2CAP_MODE_ERTM:
2097 chan->remote_tx_win = rfc.txwin_size;
2098 chan->remote_max_tx = rfc.max_transmit;
2100 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2101 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2103 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2105 rfc.retrans_timeout =
2106 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2107 rfc.monitor_timeout =
2108 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2110 set_bit(CONF_MODE_DONE, &chan->conf_state);
2112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2113 sizeof(rfc), (unsigned long) &rfc);
2115 break;
2117 case L2CAP_MODE_STREAMING:
2118 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2119 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2121 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2123 set_bit(CONF_MODE_DONE, &chan->conf_state);
2125 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2126 sizeof(rfc), (unsigned long) &rfc);
2128 break;
2130 default:
2131 result = L2CAP_CONF_UNACCEPT;
2133 memset(&rfc, 0, sizeof(rfc));
2134 rfc.mode = chan->mode;
2137 if (result == L2CAP_CONF_SUCCESS)
2138 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2140 rsp->scid = cpu_to_le16(chan->dcid);
2141 rsp->result = cpu_to_le16(result);
2142 rsp->flags = cpu_to_le16(0x0000);
2144 return ptr - data;
2147 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2149 struct l2cap_conf_req *req = data;
2150 void *ptr = req->data;
2151 int type, olen;
2152 unsigned long val;
2153 struct l2cap_conf_rfc rfc;
2155 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2157 while (len >= L2CAP_CONF_OPT_SIZE) {
2158 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2160 switch (type) {
2161 case L2CAP_CONF_MTU:
2162 if (val < L2CAP_DEFAULT_MIN_MTU) {
2163 *result = L2CAP_CONF_UNACCEPT;
2164 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2165 } else
2166 chan->imtu = val;
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2168 break;
2170 case L2CAP_CONF_FLUSH_TO:
2171 chan->flush_to = val;
2172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2173 2, chan->flush_to);
2174 break;
2176 case L2CAP_CONF_RFC:
2177 if (olen == sizeof(rfc))
2178 memcpy(&rfc, (void *)val, olen);
2180 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2181 rfc.mode != chan->mode)
2182 return -ECONNREFUSED;
2184 chan->fcs = 0;
2186 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2187 sizeof(rfc), (unsigned long) &rfc);
2188 break;
2192 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2193 return -ECONNREFUSED;
2195 chan->mode = rfc.mode;
2197 if (*result == L2CAP_CONF_SUCCESS) {
2198 switch (rfc.mode) {
2199 case L2CAP_MODE_ERTM:
2200 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2201 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2202 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2203 break;
2204 case L2CAP_MODE_STREAMING:
2205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2209 req->dcid = cpu_to_le16(chan->dcid);
2210 req->flags = cpu_to_le16(0x0000);
2212 return ptr - data;
2215 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2217 struct l2cap_conf_rsp *rsp = data;
2218 void *ptr = rsp->data;
2220 BT_DBG("chan %p", chan);
2222 rsp->scid = cpu_to_le16(chan->dcid);
2223 rsp->result = cpu_to_le16(result);
2224 rsp->flags = cpu_to_le16(flags);
2226 return ptr - data;
2229 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2231 struct l2cap_conn_rsp rsp;
2232 struct l2cap_conn *conn = chan->conn;
2233 u8 buf[128];
2235 rsp.scid = cpu_to_le16(chan->dcid);
2236 rsp.dcid = cpu_to_le16(chan->scid);
2237 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2238 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2239 l2cap_send_cmd(conn, chan->ident,
2240 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2242 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2243 return;
2245 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2246 l2cap_build_conf_req(chan, buf), buf);
2247 chan->num_conf_req++;
2250 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2252 int type, olen;
2253 unsigned long val;
2254 struct l2cap_conf_rfc rfc;
2256 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2258 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2259 return;
2261 while (len >= L2CAP_CONF_OPT_SIZE) {
2262 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2264 switch (type) {
2265 case L2CAP_CONF_RFC:
2266 if (olen == sizeof(rfc))
2267 memcpy(&rfc, (void *)val, olen);
2268 goto done;
2272 done:
2273 switch (rfc.mode) {
2274 case L2CAP_MODE_ERTM:
2275 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2276 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2278 break;
2279 case L2CAP_MODE_STREAMING:
2280 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2284 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2286 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2288 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2289 return 0;
2291 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2292 cmd->ident == conn->info_ident) {
2293 del_timer(&conn->info_timer);
2295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2296 conn->info_ident = 0;
2298 l2cap_conn_start(conn);
2301 return 0;
2304 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2306 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2307 struct l2cap_conn_rsp rsp;
2308 struct l2cap_chan *chan = NULL, *pchan;
2309 struct sock *parent, *sk = NULL;
2310 int result, status = L2CAP_CS_NO_INFO;
2312 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2313 __le16 psm = req->psm;
2315 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2317 /* Check if we have socket listening on psm */
2318 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2319 if (!pchan) {
2320 result = L2CAP_CR_BAD_PSM;
2321 goto sendresp;
2324 parent = pchan->sk;
2326 bh_lock_sock(parent);
2328 /* Check if the ACL is secure enough (if not SDP) */
2329 if (psm != cpu_to_le16(0x0001) &&
2330 !hci_conn_check_link_mode(conn->hcon)) {
2331 conn->disc_reason = 0x05;
2332 result = L2CAP_CR_SEC_BLOCK;
2333 goto response;
2336 result = L2CAP_CR_NO_MEM;
2338 /* Check for backlog size */
2339 if (sk_acceptq_is_full(parent)) {
2340 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2341 goto response;
2344 chan = pchan->ops->new_connection(pchan->data);
2345 if (!chan)
2346 goto response;
2348 sk = chan->sk;
2350 write_lock_bh(&conn->chan_lock);
2352 /* Check if we already have channel with that dcid */
2353 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2354 write_unlock_bh(&conn->chan_lock);
2355 sock_set_flag(sk, SOCK_ZAPPED);
2356 chan->ops->close(chan->data);
2357 goto response;
2360 hci_conn_hold(conn->hcon);
2362 bacpy(&bt_sk(sk)->src, conn->src);
2363 bacpy(&bt_sk(sk)->dst, conn->dst);
2364 chan->psm = psm;
2365 chan->dcid = scid;
2367 bt_accept_enqueue(parent, sk);
2369 __l2cap_chan_add(conn, chan);
2371 dcid = chan->scid;
2373 __set_chan_timer(chan, sk->sk_sndtimeo);
2375 chan->ident = cmd->ident;
2377 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2378 if (l2cap_check_security(chan)) {
2379 if (bt_sk(sk)->defer_setup) {
2380 l2cap_state_change(chan, BT_CONNECT2);
2381 result = L2CAP_CR_PEND;
2382 status = L2CAP_CS_AUTHOR_PEND;
2383 parent->sk_data_ready(parent, 0);
2384 } else {
2385 l2cap_state_change(chan, BT_CONFIG);
2386 result = L2CAP_CR_SUCCESS;
2387 status = L2CAP_CS_NO_INFO;
2389 } else {
2390 l2cap_state_change(chan, BT_CONNECT2);
2391 result = L2CAP_CR_PEND;
2392 status = L2CAP_CS_AUTHEN_PEND;
2394 } else {
2395 l2cap_state_change(chan, BT_CONNECT2);
2396 result = L2CAP_CR_PEND;
2397 status = L2CAP_CS_NO_INFO;
2400 write_unlock_bh(&conn->chan_lock);
2402 response:
2403 bh_unlock_sock(parent);
2405 sendresp:
2406 rsp.scid = cpu_to_le16(scid);
2407 rsp.dcid = cpu_to_le16(dcid);
2408 rsp.result = cpu_to_le16(result);
2409 rsp.status = cpu_to_le16(status);
2410 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2412 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2413 struct l2cap_info_req info;
2414 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2417 conn->info_ident = l2cap_get_ident(conn);
2419 mod_timer(&conn->info_timer, jiffies +
2420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2422 l2cap_send_cmd(conn, conn->info_ident,
2423 L2CAP_INFO_REQ, sizeof(info), &info);
2426 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2427 result == L2CAP_CR_SUCCESS) {
2428 u8 buf[128];
2429 set_bit(CONF_REQ_SENT, &chan->conf_state);
2430 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2431 l2cap_build_conf_req(chan, buf), buf);
2432 chan->num_conf_req++;
2435 return 0;
2438 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2440 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2441 u16 scid, dcid, result, status;
2442 struct l2cap_chan *chan;
2443 struct sock *sk;
2444 u8 req[128];
2446 scid = __le16_to_cpu(rsp->scid);
2447 dcid = __le16_to_cpu(rsp->dcid);
2448 result = __le16_to_cpu(rsp->result);
2449 status = __le16_to_cpu(rsp->status);
2451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2453 if (scid) {
2454 chan = l2cap_get_chan_by_scid(conn, scid);
2455 if (!chan)
2456 return -EFAULT;
2457 } else {
2458 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2459 if (!chan)
2460 return -EFAULT;
2463 sk = chan->sk;
2465 switch (result) {
2466 case L2CAP_CR_SUCCESS:
2467 l2cap_state_change(chan, BT_CONFIG);
2468 chan->ident = 0;
2469 chan->dcid = dcid;
2470 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2472 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2473 break;
2475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2476 l2cap_build_conf_req(chan, req), req);
2477 chan->num_conf_req++;
2478 break;
2480 case L2CAP_CR_PEND:
2481 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2482 break;
2484 default:
2485 /* don't delete l2cap channel if sk is owned by user */
2486 if (sock_owned_by_user(sk)) {
2487 l2cap_state_change(chan, BT_DISCONN);
2488 __clear_chan_timer(chan);
2489 __set_chan_timer(chan, HZ / 5);
2490 break;
2493 l2cap_chan_del(chan, ECONNREFUSED);
2494 break;
2497 bh_unlock_sock(sk);
2498 return 0;
2501 static inline void set_default_fcs(struct l2cap_chan *chan)
2503 /* FCS is enabled only in ERTM or streaming mode, if one or both
2504 * sides request it.
2506 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2507 chan->fcs = L2CAP_FCS_NONE;
2508 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2509 chan->fcs = L2CAP_FCS_CRC16;
2512 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2514 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2515 u16 dcid, flags;
2516 u8 rsp[64];
2517 struct l2cap_chan *chan;
2518 struct sock *sk;
2519 int len;
2521 dcid = __le16_to_cpu(req->dcid);
2522 flags = __le16_to_cpu(req->flags);
2524 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2526 chan = l2cap_get_chan_by_scid(conn, dcid);
2527 if (!chan)
2528 return -ENOENT;
2530 sk = chan->sk;
2532 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2533 struct l2cap_cmd_rej_cid rej;
2535 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2536 rej.scid = cpu_to_le16(chan->scid);
2537 rej.dcid = cpu_to_le16(chan->dcid);
2539 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2540 sizeof(rej), &rej);
2541 goto unlock;
2544 /* Reject if config buffer is too small. */
2545 len = cmd_len - sizeof(*req);
2546 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2547 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2548 l2cap_build_conf_rsp(chan, rsp,
2549 L2CAP_CONF_REJECT, flags), rsp);
2550 goto unlock;
2553 /* Store config. */
2554 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2555 chan->conf_len += len;
2557 if (flags & 0x0001) {
2558 /* Incomplete config. Send empty response. */
2559 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2560 l2cap_build_conf_rsp(chan, rsp,
2561 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2562 goto unlock;
2565 /* Complete config. */
2566 len = l2cap_parse_conf_req(chan, rsp);
2567 if (len < 0) {
2568 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2569 goto unlock;
2572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2573 chan->num_conf_rsp++;
2575 /* Reset config buffer. */
2576 chan->conf_len = 0;
2578 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2579 goto unlock;
2581 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2582 set_default_fcs(chan);
2584 l2cap_state_change(chan, BT_CONNECTED);
2586 chan->next_tx_seq = 0;
2587 chan->expected_tx_seq = 0;
2588 skb_queue_head_init(&chan->tx_q);
2589 if (chan->mode == L2CAP_MODE_ERTM)
2590 l2cap_ertm_init(chan);
2592 l2cap_chan_ready(sk);
2593 goto unlock;
2596 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2597 u8 buf[64];
2598 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2599 l2cap_build_conf_req(chan, buf), buf);
2600 chan->num_conf_req++;
2603 unlock:
2604 bh_unlock_sock(sk);
2605 return 0;
2608 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2610 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2611 u16 scid, flags, result;
2612 struct l2cap_chan *chan;
2613 struct sock *sk;
2614 int len = cmd->len - sizeof(*rsp);
2616 scid = __le16_to_cpu(rsp->scid);
2617 flags = __le16_to_cpu(rsp->flags);
2618 result = __le16_to_cpu(rsp->result);
2620 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2621 scid, flags, result);
2623 chan = l2cap_get_chan_by_scid(conn, scid);
2624 if (!chan)
2625 return 0;
2627 sk = chan->sk;
2629 switch (result) {
2630 case L2CAP_CONF_SUCCESS:
2631 l2cap_conf_rfc_get(chan, rsp->data, len);
2632 break;
2634 case L2CAP_CONF_UNACCEPT:
2635 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2636 char req[64];
2638 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2639 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2640 goto done;
2643 /* throw out any old stored conf requests */
2644 result = L2CAP_CONF_SUCCESS;
2645 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2646 req, &result);
2647 if (len < 0) {
2648 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2649 goto done;
2652 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2653 L2CAP_CONF_REQ, len, req);
2654 chan->num_conf_req++;
2655 if (result != L2CAP_CONF_SUCCESS)
2656 goto done;
2657 break;
2660 default:
2661 sk->sk_err = ECONNRESET;
2662 __set_chan_timer(chan, HZ * 5);
2663 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2664 goto done;
2667 if (flags & 0x01)
2668 goto done;
2670 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2672 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2673 set_default_fcs(chan);
2675 l2cap_state_change(chan, BT_CONNECTED);
2676 chan->next_tx_seq = 0;
2677 chan->expected_tx_seq = 0;
2678 skb_queue_head_init(&chan->tx_q);
2679 if (chan->mode == L2CAP_MODE_ERTM)
2680 l2cap_ertm_init(chan);
2682 l2cap_chan_ready(sk);
2685 done:
2686 bh_unlock_sock(sk);
2687 return 0;
2690 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2692 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2693 struct l2cap_disconn_rsp rsp;
2694 u16 dcid, scid;
2695 struct l2cap_chan *chan;
2696 struct sock *sk;
2698 scid = __le16_to_cpu(req->scid);
2699 dcid = __le16_to_cpu(req->dcid);
2701 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2703 chan = l2cap_get_chan_by_scid(conn, dcid);
2704 if (!chan)
2705 return 0;
2707 sk = chan->sk;
2709 rsp.dcid = cpu_to_le16(chan->scid);
2710 rsp.scid = cpu_to_le16(chan->dcid);
2711 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2713 sk->sk_shutdown = SHUTDOWN_MASK;
2715 /* don't delete l2cap channel if sk is owned by user */
2716 if (sock_owned_by_user(sk)) {
2717 l2cap_state_change(chan, BT_DISCONN);
2718 __clear_chan_timer(chan);
2719 __set_chan_timer(chan, HZ / 5);
2720 bh_unlock_sock(sk);
2721 return 0;
2724 l2cap_chan_del(chan, ECONNRESET);
2725 bh_unlock_sock(sk);
2727 chan->ops->close(chan->data);
2728 return 0;
2731 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2733 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2734 u16 dcid, scid;
2735 struct l2cap_chan *chan;
2736 struct sock *sk;
2738 scid = __le16_to_cpu(rsp->scid);
2739 dcid = __le16_to_cpu(rsp->dcid);
2741 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2743 chan = l2cap_get_chan_by_scid(conn, scid);
2744 if (!chan)
2745 return 0;
2747 sk = chan->sk;
2749 /* don't delete l2cap channel if sk is owned by user */
2750 if (sock_owned_by_user(sk)) {
2751 l2cap_state_change(chan,BT_DISCONN);
2752 __clear_chan_timer(chan);
2753 __set_chan_timer(chan, HZ / 5);
2754 bh_unlock_sock(sk);
2755 return 0;
2758 l2cap_chan_del(chan, 0);
2759 bh_unlock_sock(sk);
2761 chan->ops->close(chan->data);
2762 return 0;
2765 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2767 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2768 u16 type;
2770 type = __le16_to_cpu(req->type);
2772 BT_DBG("type 0x%4.4x", type);
2774 if (type == L2CAP_IT_FEAT_MASK) {
2775 u8 buf[8];
2776 u32 feat_mask = l2cap_feat_mask;
2777 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2778 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2779 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2780 if (!disable_ertm)
2781 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2782 | L2CAP_FEAT_FCS;
2783 put_unaligned_le32(feat_mask, rsp->data);
2784 l2cap_send_cmd(conn, cmd->ident,
2785 L2CAP_INFO_RSP, sizeof(buf), buf);
2786 } else if (type == L2CAP_IT_FIXED_CHAN) {
2787 u8 buf[12];
2788 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2789 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2790 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2791 memcpy(buf + 4, l2cap_fixed_chan, 8);
2792 l2cap_send_cmd(conn, cmd->ident,
2793 L2CAP_INFO_RSP, sizeof(buf), buf);
2794 } else {
2795 struct l2cap_info_rsp rsp;
2796 rsp.type = cpu_to_le16(type);
2797 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2798 l2cap_send_cmd(conn, cmd->ident,
2799 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2802 return 0;
2805 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2807 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2808 u16 type, result;
2810 type = __le16_to_cpu(rsp->type);
2811 result = __le16_to_cpu(rsp->result);
2813 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2815 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2816 if (cmd->ident != conn->info_ident ||
2817 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2818 return 0;
2820 del_timer(&conn->info_timer);
2822 if (result != L2CAP_IR_SUCCESS) {
2823 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2824 conn->info_ident = 0;
2826 l2cap_conn_start(conn);
2828 return 0;
2831 if (type == L2CAP_IT_FEAT_MASK) {
2832 conn->feat_mask = get_unaligned_le32(rsp->data);
2834 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2835 struct l2cap_info_req req;
2836 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2838 conn->info_ident = l2cap_get_ident(conn);
2840 l2cap_send_cmd(conn, conn->info_ident,
2841 L2CAP_INFO_REQ, sizeof(req), &req);
2842 } else {
2843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2844 conn->info_ident = 0;
2846 l2cap_conn_start(conn);
2848 } else if (type == L2CAP_IT_FIXED_CHAN) {
2849 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2850 conn->info_ident = 0;
2852 l2cap_conn_start(conn);
2855 return 0;
2858 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2859 u16 to_multiplier)
2861 u16 max_latency;
2863 if (min > max || min < 6 || max > 3200)
2864 return -EINVAL;
2866 if (to_multiplier < 10 || to_multiplier > 3200)
2867 return -EINVAL;
2869 if (max >= to_multiplier * 8)
2870 return -EINVAL;
2872 max_latency = (to_multiplier * 8 / max) - 1;
2873 if (latency > 499 || latency > max_latency)
2874 return -EINVAL;
2876 return 0;
2879 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2880 struct l2cap_cmd_hdr *cmd, u8 *data)
2882 struct hci_conn *hcon = conn->hcon;
2883 struct l2cap_conn_param_update_req *req;
2884 struct l2cap_conn_param_update_rsp rsp;
2885 u16 min, max, latency, to_multiplier, cmd_len;
2886 int err;
2888 if (!(hcon->link_mode & HCI_LM_MASTER))
2889 return -EINVAL;
2891 cmd_len = __le16_to_cpu(cmd->len);
2892 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2893 return -EPROTO;
2895 req = (struct l2cap_conn_param_update_req *) data;
2896 min = __le16_to_cpu(req->min);
2897 max = __le16_to_cpu(req->max);
2898 latency = __le16_to_cpu(req->latency);
2899 to_multiplier = __le16_to_cpu(req->to_multiplier);
2901 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2902 min, max, latency, to_multiplier);
2904 memset(&rsp, 0, sizeof(rsp));
2906 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2907 if (err)
2908 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2909 else
2910 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2912 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2913 sizeof(rsp), &rsp);
2915 if (!err)
2916 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2918 return 0;
2921 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2922 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2924 int err = 0;
2926 switch (cmd->code) {
2927 case L2CAP_COMMAND_REJ:
2928 l2cap_command_rej(conn, cmd, data);
2929 break;
2931 case L2CAP_CONN_REQ:
2932 err = l2cap_connect_req(conn, cmd, data);
2933 break;
2935 case L2CAP_CONN_RSP:
2936 err = l2cap_connect_rsp(conn, cmd, data);
2937 break;
2939 case L2CAP_CONF_REQ:
2940 err = l2cap_config_req(conn, cmd, cmd_len, data);
2941 break;
2943 case L2CAP_CONF_RSP:
2944 err = l2cap_config_rsp(conn, cmd, data);
2945 break;
2947 case L2CAP_DISCONN_REQ:
2948 err = l2cap_disconnect_req(conn, cmd, data);
2949 break;
2951 case L2CAP_DISCONN_RSP:
2952 err = l2cap_disconnect_rsp(conn, cmd, data);
2953 break;
2955 case L2CAP_ECHO_REQ:
2956 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2957 break;
2959 case L2CAP_ECHO_RSP:
2960 break;
2962 case L2CAP_INFO_REQ:
2963 err = l2cap_information_req(conn, cmd, data);
2964 break;
2966 case L2CAP_INFO_RSP:
2967 err = l2cap_information_rsp(conn, cmd, data);
2968 break;
2970 default:
2971 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2972 err = -EINVAL;
2973 break;
2976 return err;
2979 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2980 struct l2cap_cmd_hdr *cmd, u8 *data)
2982 switch (cmd->code) {
2983 case L2CAP_COMMAND_REJ:
2984 return 0;
2986 case L2CAP_CONN_PARAM_UPDATE_REQ:
2987 return l2cap_conn_param_update_req(conn, cmd, data);
2989 case L2CAP_CONN_PARAM_UPDATE_RSP:
2990 return 0;
2992 default:
2993 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2994 return -EINVAL;
2998 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2999 struct sk_buff *skb)
3001 u8 *data = skb->data;
3002 int len = skb->len;
3003 struct l2cap_cmd_hdr cmd;
3004 int err;
3006 l2cap_raw_recv(conn, skb);
3008 while (len >= L2CAP_CMD_HDR_SIZE) {
3009 u16 cmd_len;
3010 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3011 data += L2CAP_CMD_HDR_SIZE;
3012 len -= L2CAP_CMD_HDR_SIZE;
3014 cmd_len = le16_to_cpu(cmd.len);
3016 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3018 if (cmd_len > len || !cmd.ident) {
3019 BT_DBG("corrupted command");
3020 break;
3023 if (conn->hcon->type == LE_LINK)
3024 err = l2cap_le_sig_cmd(conn, &cmd, data);
3025 else
3026 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3028 if (err) {
3029 struct l2cap_cmd_rej_unk rej;
3031 BT_ERR("Wrong link type (%d)", err);
3033 /* FIXME: Map err to a valid reason */
3034 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3035 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3038 data += cmd_len;
3039 len -= cmd_len;
3042 kfree_skb(skb);
3045 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3047 u16 our_fcs, rcv_fcs;
3048 int hdr_size = L2CAP_HDR_SIZE + 2;
3050 if (chan->fcs == L2CAP_FCS_CRC16) {
3051 skb_trim(skb, skb->len - 2);
3052 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3053 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3055 if (our_fcs != rcv_fcs)
3056 return -EBADMSG;
3058 return 0;
3061 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3063 u16 control = 0;
3065 chan->frames_sent = 0;
3067 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3069 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3070 control |= L2CAP_SUPER_RCV_NOT_READY;
3071 l2cap_send_sframe(chan, control);
3072 set_bit(CONN_RNR_SENT, &chan->conn_state);
3075 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3076 l2cap_retransmit_frames(chan);
3078 l2cap_ertm_send(chan);
3080 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3081 chan->frames_sent == 0) {
3082 control |= L2CAP_SUPER_RCV_READY;
3083 l2cap_send_sframe(chan, control);
3087 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3089 struct sk_buff *next_skb;
3090 int tx_seq_offset, next_tx_seq_offset;
3092 bt_cb(skb)->tx_seq = tx_seq;
3093 bt_cb(skb)->sar = sar;
3095 next_skb = skb_peek(&chan->srej_q);
3096 if (!next_skb) {
3097 __skb_queue_tail(&chan->srej_q, skb);
3098 return 0;
3101 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3102 if (tx_seq_offset < 0)
3103 tx_seq_offset += 64;
3105 do {
3106 if (bt_cb(next_skb)->tx_seq == tx_seq)
3107 return -EINVAL;
3109 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3110 chan->buffer_seq) % 64;
3111 if (next_tx_seq_offset < 0)
3112 next_tx_seq_offset += 64;
3114 if (next_tx_seq_offset > tx_seq_offset) {
3115 __skb_queue_before(&chan->srej_q, next_skb, skb);
3116 return 0;
3119 if (skb_queue_is_last(&chan->srej_q, next_skb))
3120 break;
3122 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3124 __skb_queue_tail(&chan->srej_q, skb);
3126 return 0;
3129 static void append_skb_frag(struct sk_buff *skb,
3130 struct sk_buff *new_frag, struct sk_buff **last_frag)
3132 /* skb->len reflects data in skb as well as all fragments
3133 * skb->data_len reflects only data in fragments
3135 if (!skb_has_frag_list(skb))
3136 skb_shinfo(skb)->frag_list = new_frag;
3138 new_frag->next = NULL;
3140 (*last_frag)->next = new_frag;
3141 *last_frag = new_frag;
3143 skb->len += new_frag->len;
3144 skb->data_len += new_frag->len;
3145 skb->truesize += new_frag->truesize;
3148 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3150 int err = -EINVAL;
3152 switch (control & L2CAP_CTRL_SAR) {
3153 case L2CAP_SDU_UNSEGMENTED:
3154 if (chan->sdu)
3155 break;
3157 err = chan->ops->recv(chan->data, skb);
3158 break;
3160 case L2CAP_SDU_START:
3161 if (chan->sdu)
3162 break;
3164 chan->sdu_len = get_unaligned_le16(skb->data);
3165 skb_pull(skb, 2);
3167 if (chan->sdu_len > chan->imtu) {
3168 err = -EMSGSIZE;
3169 break;
3172 if (skb->len >= chan->sdu_len)
3173 break;
3175 chan->sdu = skb;
3176 chan->sdu_last_frag = skb;
3178 skb = NULL;
3179 err = 0;
3180 break;
3182 case L2CAP_SDU_CONTINUE:
3183 if (!chan->sdu)
3184 break;
3186 append_skb_frag(chan->sdu, skb,
3187 &chan->sdu_last_frag);
3188 skb = NULL;
3190 if (chan->sdu->len >= chan->sdu_len)
3191 break;
3193 err = 0;
3194 break;
3196 case L2CAP_SDU_END:
3197 if (!chan->sdu)
3198 break;
3200 append_skb_frag(chan->sdu, skb,
3201 &chan->sdu_last_frag);
3202 skb = NULL;
3204 if (chan->sdu->len != chan->sdu_len)
3205 break;
3207 err = chan->ops->recv(chan->data, chan->sdu);
3209 if (!err) {
3210 /* Reassembly complete */
3211 chan->sdu = NULL;
3212 chan->sdu_last_frag = NULL;
3213 chan->sdu_len = 0;
3215 break;
3218 if (err) {
3219 kfree_skb(skb);
3220 kfree_skb(chan->sdu);
3221 chan->sdu = NULL;
3222 chan->sdu_last_frag = NULL;
3223 chan->sdu_len = 0;
3226 return err;
3229 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3231 u16 control;
3233 BT_DBG("chan %p, Enter local busy", chan);
3235 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3237 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3238 control |= L2CAP_SUPER_RCV_NOT_READY;
3239 l2cap_send_sframe(chan, control);
3241 set_bit(CONN_RNR_SENT, &chan->conn_state);
3243 __clear_ack_timer(chan);
3246 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3248 u16 control;
3250 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3251 goto done;
3253 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3254 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3255 l2cap_send_sframe(chan, control);
3256 chan->retry_count = 1;
3258 __clear_retrans_timer(chan);
3259 __set_monitor_timer(chan);
3261 set_bit(CONN_WAIT_F, &chan->conn_state);
3263 done:
3264 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3265 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3267 BT_DBG("chan %p, Exit local busy", chan);
3270 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3272 if (chan->mode == L2CAP_MODE_ERTM) {
3273 if (busy)
3274 l2cap_ertm_enter_local_busy(chan);
3275 else
3276 l2cap_ertm_exit_local_busy(chan);
3280 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3282 struct sk_buff *skb;
3283 u16 control;
3285 while ((skb = skb_peek(&chan->srej_q)) &&
3286 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3287 int err;
3289 if (bt_cb(skb)->tx_seq != tx_seq)
3290 break;
3292 skb = skb_dequeue(&chan->srej_q);
3293 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3294 err = l2cap_reassemble_sdu(chan, skb, control);
3296 if (err < 0) {
3297 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3298 break;
3301 chan->buffer_seq_srej =
3302 (chan->buffer_seq_srej + 1) % 64;
3303 tx_seq = (tx_seq + 1) % 64;
3307 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3309 struct srej_list *l, *tmp;
3310 u16 control;
3312 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3313 if (l->tx_seq == tx_seq) {
3314 list_del(&l->list);
3315 kfree(l);
3316 return;
3318 control = L2CAP_SUPER_SELECT_REJECT;
3319 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3320 l2cap_send_sframe(chan, control);
3321 list_del(&l->list);
3322 list_add_tail(&l->list, &chan->srej_l);
3326 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3328 struct srej_list *new;
3329 u16 control;
3331 while (tx_seq != chan->expected_tx_seq) {
3332 control = L2CAP_SUPER_SELECT_REJECT;
3333 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3334 l2cap_send_sframe(chan, control);
3336 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3337 new->tx_seq = chan->expected_tx_seq;
3338 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3339 list_add_tail(&new->list, &chan->srej_l);
3341 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3344 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3346 u8 tx_seq = __get_txseq(rx_control);
3347 u8 req_seq = __get_reqseq(rx_control);
3348 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3349 int tx_seq_offset, expected_tx_seq_offset;
3350 int num_to_ack = (chan->tx_win/6) + 1;
3351 int err = 0;
3353 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3354 tx_seq, rx_control);
3356 if (L2CAP_CTRL_FINAL & rx_control &&
3357 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3358 __clear_monitor_timer(chan);
3359 if (chan->unacked_frames > 0)
3360 __set_retrans_timer(chan);
3361 clear_bit(CONN_WAIT_F, &chan->conn_state);
3364 chan->expected_ack_seq = req_seq;
3365 l2cap_drop_acked_frames(chan);
3367 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3368 if (tx_seq_offset < 0)
3369 tx_seq_offset += 64;
3371 /* invalid tx_seq */
3372 if (tx_seq_offset >= chan->tx_win) {
3373 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3374 goto drop;
3377 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3378 goto drop;
3380 if (tx_seq == chan->expected_tx_seq)
3381 goto expected;
3383 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3384 struct srej_list *first;
3386 first = list_first_entry(&chan->srej_l,
3387 struct srej_list, list);
3388 if (tx_seq == first->tx_seq) {
3389 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3390 l2cap_check_srej_gap(chan, tx_seq);
3392 list_del(&first->list);
3393 kfree(first);
3395 if (list_empty(&chan->srej_l)) {
3396 chan->buffer_seq = chan->buffer_seq_srej;
3397 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3398 l2cap_send_ack(chan);
3399 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3401 } else {
3402 struct srej_list *l;
3404 /* duplicated tx_seq */
3405 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3406 goto drop;
3408 list_for_each_entry(l, &chan->srej_l, list) {
3409 if (l->tx_seq == tx_seq) {
3410 l2cap_resend_srejframe(chan, tx_seq);
3411 return 0;
3414 l2cap_send_srejframe(chan, tx_seq);
3416 } else {
3417 expected_tx_seq_offset =
3418 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3419 if (expected_tx_seq_offset < 0)
3420 expected_tx_seq_offset += 64;
3422 /* duplicated tx_seq */
3423 if (tx_seq_offset < expected_tx_seq_offset)
3424 goto drop;
3426 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3428 BT_DBG("chan %p, Enter SREJ", chan);
3430 INIT_LIST_HEAD(&chan->srej_l);
3431 chan->buffer_seq_srej = chan->buffer_seq;
3433 __skb_queue_head_init(&chan->srej_q);
3434 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3436 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3438 l2cap_send_srejframe(chan, tx_seq);
3440 __clear_ack_timer(chan);
3442 return 0;
3444 expected:
3445 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3447 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3448 bt_cb(skb)->tx_seq = tx_seq;
3449 bt_cb(skb)->sar = sar;
3450 __skb_queue_tail(&chan->srej_q, skb);
3451 return 0;
3454 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3455 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3456 if (err < 0) {
3457 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3458 return err;
3461 if (rx_control & L2CAP_CTRL_FINAL) {
3462 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3463 l2cap_retransmit_frames(chan);
3466 __set_ack_timer(chan);
3468 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3469 if (chan->num_acked == num_to_ack - 1)
3470 l2cap_send_ack(chan);
3472 return 0;
3474 drop:
3475 kfree_skb(skb);
3476 return 0;
3479 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3481 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3482 rx_control);
3484 chan->expected_ack_seq = __get_reqseq(rx_control);
3485 l2cap_drop_acked_frames(chan);
3487 if (rx_control & L2CAP_CTRL_POLL) {
3488 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3489 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3490 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3491 (chan->unacked_frames > 0))
3492 __set_retrans_timer(chan);
3494 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3495 l2cap_send_srejtail(chan);
3496 } else {
3497 l2cap_send_i_or_rr_or_rnr(chan);
3500 } else if (rx_control & L2CAP_CTRL_FINAL) {
3501 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3503 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3504 l2cap_retransmit_frames(chan);
3506 } else {
3507 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3508 (chan->unacked_frames > 0))
3509 __set_retrans_timer(chan);
3511 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3512 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3513 l2cap_send_ack(chan);
3514 else
3515 l2cap_ertm_send(chan);
3519 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3521 u8 tx_seq = __get_reqseq(rx_control);
3523 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3525 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3527 chan->expected_ack_seq = tx_seq;
3528 l2cap_drop_acked_frames(chan);
3530 if (rx_control & L2CAP_CTRL_FINAL) {
3531 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3532 l2cap_retransmit_frames(chan);
3533 } else {
3534 l2cap_retransmit_frames(chan);
3536 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3537 set_bit(CONN_REJ_ACT, &chan->conn_state);
3540 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3542 u8 tx_seq = __get_reqseq(rx_control);
3544 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3546 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3548 if (rx_control & L2CAP_CTRL_POLL) {
3549 chan->expected_ack_seq = tx_seq;
3550 l2cap_drop_acked_frames(chan);
3552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3553 l2cap_retransmit_one_frame(chan, tx_seq);
3555 l2cap_ertm_send(chan);
3557 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3558 chan->srej_save_reqseq = tx_seq;
3559 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3561 } else if (rx_control & L2CAP_CTRL_FINAL) {
3562 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3563 chan->srej_save_reqseq == tx_seq)
3564 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3565 else
3566 l2cap_retransmit_one_frame(chan, tx_seq);
3567 } else {
3568 l2cap_retransmit_one_frame(chan, tx_seq);
3569 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3570 chan->srej_save_reqseq = tx_seq;
3571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3576 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3578 u8 tx_seq = __get_reqseq(rx_control);
3580 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3582 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3583 chan->expected_ack_seq = tx_seq;
3584 l2cap_drop_acked_frames(chan);
3586 if (rx_control & L2CAP_CTRL_POLL)
3587 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3589 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3590 __clear_retrans_timer(chan);
3591 if (rx_control & L2CAP_CTRL_POLL)
3592 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3593 return;
3596 if (rx_control & L2CAP_CTRL_POLL)
3597 l2cap_send_srejtail(chan);
3598 else
3599 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3602 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3604 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3606 if (L2CAP_CTRL_FINAL & rx_control &&
3607 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3608 __clear_monitor_timer(chan);
3609 if (chan->unacked_frames > 0)
3610 __set_retrans_timer(chan);
3611 clear_bit(CONN_WAIT_F, &chan->conn_state);
3614 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3615 case L2CAP_SUPER_RCV_READY:
3616 l2cap_data_channel_rrframe(chan, rx_control);
3617 break;
3619 case L2CAP_SUPER_REJECT:
3620 l2cap_data_channel_rejframe(chan, rx_control);
3621 break;
3623 case L2CAP_SUPER_SELECT_REJECT:
3624 l2cap_data_channel_srejframe(chan, rx_control);
3625 break;
3627 case L2CAP_SUPER_RCV_NOT_READY:
3628 l2cap_data_channel_rnrframe(chan, rx_control);
3629 break;
3632 kfree_skb(skb);
3633 return 0;
3636 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3638 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3639 u16 control;
3640 u8 req_seq;
3641 int len, next_tx_seq_offset, req_seq_offset;
3643 control = get_unaligned_le16(skb->data);
3644 skb_pull(skb, 2);
3645 len = skb->len;
3648 * We can just drop the corrupted I-frame here.
3649 * Receiver will miss it and start proper recovery
3650 * procedures and ask retransmission.
3652 if (l2cap_check_fcs(chan, skb))
3653 goto drop;
3655 if (__is_sar_start(control) && __is_iframe(control))
3656 len -= 2;
3658 if (chan->fcs == L2CAP_FCS_CRC16)
3659 len -= 2;
3661 if (len > chan->mps) {
3662 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3663 goto drop;
3666 req_seq = __get_reqseq(control);
3667 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3668 if (req_seq_offset < 0)
3669 req_seq_offset += 64;
3671 next_tx_seq_offset =
3672 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3673 if (next_tx_seq_offset < 0)
3674 next_tx_seq_offset += 64;
3676 /* check for invalid req-seq */
3677 if (req_seq_offset > next_tx_seq_offset) {
3678 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3679 goto drop;
3682 if (__is_iframe(control)) {
3683 if (len < 0) {
3684 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3685 goto drop;
3688 l2cap_data_channel_iframe(chan, control, skb);
3689 } else {
3690 if (len != 0) {
3691 BT_ERR("%d", len);
3692 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3693 goto drop;
3696 l2cap_data_channel_sframe(chan, control, skb);
3699 return 0;
3701 drop:
3702 kfree_skb(skb);
3703 return 0;
3706 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3708 struct l2cap_chan *chan;
3709 struct sock *sk = NULL;
3710 u16 control;
3711 u8 tx_seq;
3712 int len;
3714 chan = l2cap_get_chan_by_scid(conn, cid);
3715 if (!chan) {
3716 BT_DBG("unknown cid 0x%4.4x", cid);
3717 goto drop;
3720 sk = chan->sk;
3722 BT_DBG("chan %p, len %d", chan, skb->len);
3724 if (chan->state != BT_CONNECTED)
3725 goto drop;
3727 switch (chan->mode) {
3728 case L2CAP_MODE_BASIC:
3729 /* If socket recv buffers overflows we drop data here
3730 * which is *bad* because L2CAP has to be reliable.
3731 * But we don't have any other choice. L2CAP doesn't
3732 * provide flow control mechanism. */
3734 if (chan->imtu < skb->len)
3735 goto drop;
3737 if (!chan->ops->recv(chan->data, skb))
3738 goto done;
3739 break;
3741 case L2CAP_MODE_ERTM:
3742 if (!sock_owned_by_user(sk)) {
3743 l2cap_ertm_data_rcv(sk, skb);
3744 } else {
3745 if (sk_add_backlog(sk, skb))
3746 goto drop;
3749 goto done;
3751 case L2CAP_MODE_STREAMING:
3752 control = get_unaligned_le16(skb->data);
3753 skb_pull(skb, 2);
3754 len = skb->len;
3756 if (l2cap_check_fcs(chan, skb))
3757 goto drop;
3759 if (__is_sar_start(control))
3760 len -= 2;
3762 if (chan->fcs == L2CAP_FCS_CRC16)
3763 len -= 2;
3765 if (len > chan->mps || len < 0 || __is_sframe(control))
3766 goto drop;
3768 tx_seq = __get_txseq(control);
3770 if (chan->expected_tx_seq != tx_seq) {
3771 /* Frame(s) missing - must discard partial SDU */
3772 kfree_skb(chan->sdu);
3773 chan->sdu = NULL;
3774 chan->sdu_last_frag = NULL;
3775 chan->sdu_len = 0;
3777 /* TODO: Notify userland of missing data */
3780 chan->expected_tx_seq = (tx_seq + 1) % 64;
3782 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3785 goto done;
3787 default:
3788 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3789 break;
3792 drop:
3793 kfree_skb(skb);
3795 done:
3796 if (sk)
3797 bh_unlock_sock(sk);
3799 return 0;
3802 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3804 struct sock *sk = NULL;
3805 struct l2cap_chan *chan;
3807 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3808 if (!chan)
3809 goto drop;
3811 sk = chan->sk;
3813 bh_lock_sock(sk);
3815 BT_DBG("sk %p, len %d", sk, skb->len);
3817 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3818 goto drop;
3820 if (chan->imtu < skb->len)
3821 goto drop;
3823 if (!chan->ops->recv(chan->data, skb))
3824 goto done;
3826 drop:
3827 kfree_skb(skb);
3829 done:
3830 if (sk)
3831 bh_unlock_sock(sk);
3832 return 0;
3835 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3837 struct sock *sk = NULL;
3838 struct l2cap_chan *chan;
3840 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3841 if (!chan)
3842 goto drop;
3844 sk = chan->sk;
3846 bh_lock_sock(sk);
3848 BT_DBG("sk %p, len %d", sk, skb->len);
3850 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3851 goto drop;
3853 if (chan->imtu < skb->len)
3854 goto drop;
3856 if (!chan->ops->recv(chan->data, skb))
3857 goto done;
3859 drop:
3860 kfree_skb(skb);
3862 done:
3863 if (sk)
3864 bh_unlock_sock(sk);
3865 return 0;
3868 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3870 struct l2cap_hdr *lh = (void *) skb->data;
3871 u16 cid, len;
3872 __le16 psm;
3874 skb_pull(skb, L2CAP_HDR_SIZE);
3875 cid = __le16_to_cpu(lh->cid);
3876 len = __le16_to_cpu(lh->len);
3878 if (len != skb->len) {
3879 kfree_skb(skb);
3880 return;
3883 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3885 switch (cid) {
3886 case L2CAP_CID_LE_SIGNALING:
3887 case L2CAP_CID_SIGNALING:
3888 l2cap_sig_channel(conn, skb);
3889 break;
3891 case L2CAP_CID_CONN_LESS:
3892 psm = get_unaligned_le16(skb->data);
3893 skb_pull(skb, 2);
3894 l2cap_conless_channel(conn, psm, skb);
3895 break;
3897 case L2CAP_CID_LE_DATA:
3898 l2cap_att_channel(conn, cid, skb);
3899 break;
3901 case L2CAP_CID_SMP:
3902 if (smp_sig_channel(conn, skb))
3903 l2cap_conn_del(conn->hcon, EACCES);
3904 break;
3906 default:
3907 l2cap_data_channel(conn, cid, skb);
3908 break;
3912 /* ---- L2CAP interface with lower layer (HCI) ---- */
3914 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3916 int exact = 0, lm1 = 0, lm2 = 0;
3917 struct l2cap_chan *c;
3919 if (type != ACL_LINK)
3920 return -EINVAL;
3922 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3924 /* Find listening sockets and check their link_mode */
3925 read_lock(&chan_list_lock);
3926 list_for_each_entry(c, &chan_list, global_l) {
3927 struct sock *sk = c->sk;
3929 if (c->state != BT_LISTEN)
3930 continue;
3932 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3933 lm1 |= HCI_LM_ACCEPT;
3934 if (c->role_switch)
3935 lm1 |= HCI_LM_MASTER;
3936 exact++;
3937 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3938 lm2 |= HCI_LM_ACCEPT;
3939 if (c->role_switch)
3940 lm2 |= HCI_LM_MASTER;
3943 read_unlock(&chan_list_lock);
3945 return exact ? lm1 : lm2;
3948 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3950 struct l2cap_conn *conn;
3952 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3954 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3955 return -EINVAL;
3957 if (!status) {
3958 conn = l2cap_conn_add(hcon, status);
3959 if (conn)
3960 l2cap_conn_ready(conn);
3961 } else
3962 l2cap_conn_del(hcon, bt_to_errno(status));
3964 return 0;
3967 static int l2cap_disconn_ind(struct hci_conn *hcon)
3969 struct l2cap_conn *conn = hcon->l2cap_data;
3971 BT_DBG("hcon %p", hcon);
3973 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3974 return 0x13;
3976 return conn->disc_reason;
3979 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3981 BT_DBG("hcon %p reason %d", hcon, reason);
3983 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3984 return -EINVAL;
3986 l2cap_conn_del(hcon, bt_to_errno(reason));
3988 return 0;
3991 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3993 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
3994 return;
3996 if (encrypt == 0x00) {
3997 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3998 __clear_chan_timer(chan);
3999 __set_chan_timer(chan, HZ * 5);
4000 } else if (chan->sec_level == BT_SECURITY_HIGH)
4001 l2cap_chan_close(chan, ECONNREFUSED);
4002 } else {
4003 if (chan->sec_level == BT_SECURITY_MEDIUM)
4004 __clear_chan_timer(chan);
4008 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4010 struct l2cap_conn *conn = hcon->l2cap_data;
4011 struct l2cap_chan *chan;
4013 if (!conn)
4014 return 0;
4016 BT_DBG("conn %p", conn);
4018 if (hcon->type == LE_LINK) {
4019 smp_distribute_keys(conn, 0);
4020 del_timer(&conn->security_timer);
4023 read_lock(&conn->chan_lock);
4025 list_for_each_entry(chan, &conn->chan_l, list) {
4026 struct sock *sk = chan->sk;
4028 bh_lock_sock(sk);
4030 BT_DBG("chan->scid %d", chan->scid);
4032 if (chan->scid == L2CAP_CID_LE_DATA) {
4033 if (!status && encrypt) {
4034 chan->sec_level = hcon->sec_level;
4035 l2cap_chan_ready(sk);
4038 bh_unlock_sock(sk);
4039 continue;
4042 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4043 bh_unlock_sock(sk);
4044 continue;
4047 if (!status && (chan->state == BT_CONNECTED ||
4048 chan->state == BT_CONFIG)) {
4049 l2cap_check_encryption(chan, encrypt);
4050 bh_unlock_sock(sk);
4051 continue;
4054 if (chan->state == BT_CONNECT) {
4055 if (!status) {
4056 struct l2cap_conn_req req;
4057 req.scid = cpu_to_le16(chan->scid);
4058 req.psm = chan->psm;
4060 chan->ident = l2cap_get_ident(conn);
4061 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4063 l2cap_send_cmd(conn, chan->ident,
4064 L2CAP_CONN_REQ, sizeof(req), &req);
4065 } else {
4066 __clear_chan_timer(chan);
4067 __set_chan_timer(chan, HZ / 10);
4069 } else if (chan->state == BT_CONNECT2) {
4070 struct l2cap_conn_rsp rsp;
4071 __u16 res, stat;
4073 if (!status) {
4074 if (bt_sk(sk)->defer_setup) {
4075 struct sock *parent = bt_sk(sk)->parent;
4076 res = L2CAP_CR_PEND;
4077 stat = L2CAP_CS_AUTHOR_PEND;
4078 if (parent)
4079 parent->sk_data_ready(parent, 0);
4080 } else {
4081 l2cap_state_change(chan, BT_CONFIG);
4082 res = L2CAP_CR_SUCCESS;
4083 stat = L2CAP_CS_NO_INFO;
4085 } else {
4086 l2cap_state_change(chan, BT_DISCONN);
4087 __set_chan_timer(chan, HZ / 10);
4088 res = L2CAP_CR_SEC_BLOCK;
4089 stat = L2CAP_CS_NO_INFO;
4092 rsp.scid = cpu_to_le16(chan->dcid);
4093 rsp.dcid = cpu_to_le16(chan->scid);
4094 rsp.result = cpu_to_le16(res);
4095 rsp.status = cpu_to_le16(stat);
4096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4097 sizeof(rsp), &rsp);
4100 bh_unlock_sock(sk);
4103 read_unlock(&conn->chan_lock);
4105 return 0;
4108 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4110 struct l2cap_conn *conn = hcon->l2cap_data;
4112 if (!conn)
4113 conn = l2cap_conn_add(hcon, 0);
4115 if (!conn)
4116 goto drop;
4118 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4120 if (!(flags & ACL_CONT)) {
4121 struct l2cap_hdr *hdr;
4122 struct l2cap_chan *chan;
4123 u16 cid;
4124 int len;
4126 if (conn->rx_len) {
4127 BT_ERR("Unexpected start frame (len %d)", skb->len);
4128 kfree_skb(conn->rx_skb);
4129 conn->rx_skb = NULL;
4130 conn->rx_len = 0;
4131 l2cap_conn_unreliable(conn, ECOMM);
4134 /* Start fragment always begin with Basic L2CAP header */
4135 if (skb->len < L2CAP_HDR_SIZE) {
4136 BT_ERR("Frame is too short (len %d)", skb->len);
4137 l2cap_conn_unreliable(conn, ECOMM);
4138 goto drop;
4141 hdr = (struct l2cap_hdr *) skb->data;
4142 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4143 cid = __le16_to_cpu(hdr->cid);
4145 if (len == skb->len) {
4146 /* Complete frame received */
4147 l2cap_recv_frame(conn, skb);
4148 return 0;
4151 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4153 if (skb->len > len) {
4154 BT_ERR("Frame is too long (len %d, expected len %d)",
4155 skb->len, len);
4156 l2cap_conn_unreliable(conn, ECOMM);
4157 goto drop;
4160 chan = l2cap_get_chan_by_scid(conn, cid);
4162 if (chan && chan->sk) {
4163 struct sock *sk = chan->sk;
4165 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4166 BT_ERR("Frame exceeding recv MTU (len %d, "
4167 "MTU %d)", len,
4168 chan->imtu);
4169 bh_unlock_sock(sk);
4170 l2cap_conn_unreliable(conn, ECOMM);
4171 goto drop;
4173 bh_unlock_sock(sk);
4176 /* Allocate skb for the complete frame (with header) */
4177 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4178 if (!conn->rx_skb)
4179 goto drop;
4181 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4182 skb->len);
4183 conn->rx_len = len - skb->len;
4184 } else {
4185 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4187 if (!conn->rx_len) {
4188 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4189 l2cap_conn_unreliable(conn, ECOMM);
4190 goto drop;
4193 if (skb->len > conn->rx_len) {
4194 BT_ERR("Fragment is too long (len %d, expected %d)",
4195 skb->len, conn->rx_len);
4196 kfree_skb(conn->rx_skb);
4197 conn->rx_skb = NULL;
4198 conn->rx_len = 0;
4199 l2cap_conn_unreliable(conn, ECOMM);
4200 goto drop;
4203 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4204 skb->len);
4205 conn->rx_len -= skb->len;
4207 if (!conn->rx_len) {
4208 /* Complete frame received */
4209 l2cap_recv_frame(conn, conn->rx_skb);
4210 conn->rx_skb = NULL;
4214 drop:
4215 kfree_skb(skb);
4216 return 0;
4219 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4221 struct l2cap_chan *c;
4223 read_lock_bh(&chan_list_lock);
4225 list_for_each_entry(c, &chan_list, global_l) {
4226 struct sock *sk = c->sk;
4228 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4229 batostr(&bt_sk(sk)->src),
4230 batostr(&bt_sk(sk)->dst),
4231 c->state, __le16_to_cpu(c->psm),
4232 c->scid, c->dcid, c->imtu, c->omtu,
4233 c->sec_level, c->mode);
4236 read_unlock_bh(&chan_list_lock);
4238 return 0;
4241 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4243 return single_open(file, l2cap_debugfs_show, inode->i_private);
4246 static const struct file_operations l2cap_debugfs_fops = {
4247 .open = l2cap_debugfs_open,
4248 .read = seq_read,
4249 .llseek = seq_lseek,
4250 .release = single_release,
4253 static struct dentry *l2cap_debugfs;
4255 static struct hci_proto l2cap_hci_proto = {
4256 .name = "L2CAP",
4257 .id = HCI_PROTO_L2CAP,
4258 .connect_ind = l2cap_connect_ind,
4259 .connect_cfm = l2cap_connect_cfm,
4260 .disconn_ind = l2cap_disconn_ind,
4261 .disconn_cfm = l2cap_disconn_cfm,
4262 .security_cfm = l2cap_security_cfm,
4263 .recv_acldata = l2cap_recv_acldata
4266 int __init l2cap_init(void)
4268 int err;
4270 err = l2cap_init_sockets();
4271 if (err < 0)
4272 return err;
4274 err = hci_register_proto(&l2cap_hci_proto);
4275 if (err < 0) {
4276 BT_ERR("L2CAP protocol registration failed");
4277 bt_sock_unregister(BTPROTO_L2CAP);
4278 goto error;
4281 if (bt_debugfs) {
4282 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4283 bt_debugfs, NULL, &l2cap_debugfs_fops);
4284 if (!l2cap_debugfs)
4285 BT_ERR("Failed to create L2CAP debug file");
4288 return 0;
4290 error:
4291 l2cap_cleanup_sockets();
4292 return err;
4295 void l2cap_exit(void)
4297 debugfs_remove(l2cap_debugfs);
4299 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4300 BT_ERR("L2CAP protocol unregistration failed");
4302 l2cap_cleanup_sockets();
4305 module_param(disable_ertm, bool, 0644);
4306 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");