Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[nv-tegra-linux-2.6.git] / net / bluetooth / l2cap.c
blob7794a2e2adcea93128fc5fcf4ccf8a8e306af434
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
81 int reason;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 bh_lock_sock(sk);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
95 __l2cap_sock_close(sk, reason);
97 bh_unlock_sock(sk);
99 l2cap_sock_kill(sk);
100 sock_put(sk);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
123 return s;
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
133 return s;
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
156 return s;
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
179 return 0;
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 sock_hold(sk);
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
208 __sock_put(sk);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
239 if (parent)
240 bt_accept_enqueue(parent, sk);
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
264 if (err)
265 sk->sk_err = err;
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
308 u8 id;
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
321 id = conn->tx_ident;
323 spin_unlock_bh(&conn->lock);
325 return id;
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
334 if (!skb)
335 return -ENOMEM;
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 if (!skb)
357 return -ENOMEM;
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
376 else
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 return;
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
402 } else {
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
433 BT_DBG("conn %p", conn);
435 read_lock(&l->lock);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 bh_unlock_sock(sk);
442 continue;
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
468 } else {
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
473 } else {
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
482 bh_unlock_sock(sk);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
491 struct sock *sk;
493 BT_DBG("conn %p", conn);
495 read_lock(&l->lock);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 bh_lock_sock(sk);
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
505 l2cap_do_start(sk);
507 bh_unlock_sock(sk);
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
517 struct sock *sk;
519 BT_DBG("conn %p", conn);
521 read_lock(&l->lock);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
525 sk->sk_err = err;
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
545 if (conn || status)
546 return conn;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 if (!conn)
550 return NULL;
552 hcon->l2cap_data = conn;
553 conn->hcon = hcon;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
561 conn->feat_mask = 0;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
571 return conn;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
577 struct sock *sk;
579 if (!conn)
580 return;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
586 /* Kill channels */
587 while ((sk = conn->chan_list.head)) {
588 bh_lock_sock(sk);
589 l2cap_chan_del(sk, err);
590 bh_unlock_sock(sk);
591 l2cap_sock_kill(sk);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
598 kfree(conn);
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
612 struct sock *sk;
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
616 goto found;
617 sk = NULL;
618 found:
619 return sk;
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
632 continue;
634 if (l2cap_pi(sk)->psm == psm) {
635 /* Exact match. */
636 if (!bacmp(&bt_sk(sk)->src, src))
637 break;
639 /* Closest match */
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 sk1 = sk;
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *s;
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
654 if (s)
655 bh_lock_sock(s);
656 read_unlock(&l2cap_sk_list.lock);
657 return s;
660 static void l2cap_sock_destruct(struct sock *sk)
662 BT_DBG("sk %p", sk);
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
670 struct sock *sk;
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 return;
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
695 sock_put(sk);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
703 case BT_LISTEN:
704 l2cap_sock_cleanup_listen(sk);
705 break;
707 case BT_CONNECTED:
708 case BT_CONFIG:
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
715 } else
716 l2cap_chan_del(sk, reason);
717 break;
719 case BT_CONNECT2:
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
723 __u16 result;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
727 else
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
736 } else
737 l2cap_chan_del(sk, reason);
738 break;
740 case BT_CONNECT:
741 case BT_DISCONN:
742 l2cap_chan_del(sk, reason);
743 break;
745 default:
746 sock_set_flag(sk, SOCK_ZAPPED);
747 break;
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
755 lock_sock(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
757 release_sock(sk);
758 l2cap_sock_kill(sk);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
765 BT_DBG("sk %p", sk);
767 if (parent) {
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
778 } else {
779 pi->imtu = L2CAP_DEFAULT_MTU;
780 pi->omtu = 0;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
784 pi->role_switch = 0;
785 pi->force_reliable = 0;
788 /* Default config options */
789 pi->conf_len = 0;
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
797 .name = "L2CAP",
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
804 struct sock *sk;
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 if (!sk)
808 return NULL;
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
824 return sk;
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
828 int kern)
830 struct sock *sk;
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
841 return -EPERM;
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 if (!sk)
847 return -ENOMEM;
849 l2cap_sock_init(sk, NULL);
850 return 0;
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
857 int len, err = 0;
859 BT_DBG("sk %p", sk);
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
862 return -EINVAL;
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
868 if (la.l2_cid)
869 return -EINVAL;
871 lock_sock(sk);
873 if (sk->sk_state != BT_OPEN) {
874 err = -EBADFD;
875 goto done;
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
880 err = -EACCES;
881 goto done;
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
887 err = -EADDRINUSE;
888 } else {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
902 done:
903 release_sock(sk);
904 return err;
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
914 __u8 auth_type;
915 int err;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
918 l2cap_pi(sk)->psm);
920 hdev = hci_get_route(dst, src);
921 if (!hdev)
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
926 err = -ENOMEM;
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
932 break;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
935 break;
936 default:
937 auth_type = HCI_AT_NO_BONDING;
938 break;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
943 else
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
948 } else {
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
952 break;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
955 break;
956 default:
957 auth_type = HCI_AT_NO_BONDING;
958 break;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
964 if (!hcon)
965 goto done;
967 conn = l2cap_conn_add(hcon, 0);
968 if (!conn) {
969 hci_conn_put(hcon);
970 goto done;
973 err = 0;
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
987 } else
988 l2cap_do_start(sk);
991 done:
992 hci_dev_unlock_bh(hdev);
993 hci_dev_put(hdev);
994 return err;
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || addr->sa_family != AF_BLUETOOTH)
1006 return -EINVAL;
1008 memset(&la, 0, sizeof(la));
1009 len = min_t(unsigned int, sizeof(la), alen);
1010 memcpy(&la, addr, len);
1012 if (la.l2_cid)
1013 return -EINVAL;
1015 lock_sock(sk);
1017 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1018 err = -EINVAL;
1019 goto done;
1022 switch (l2cap_pi(sk)->mode) {
1023 case L2CAP_MODE_BASIC:
1024 break;
1025 case L2CAP_MODE_ERTM:
1026 case L2CAP_MODE_STREAMING:
1027 if (enable_ertm)
1028 break;
1029 /* fall through */
1030 default:
1031 err = -ENOTSUPP;
1032 goto done;
1035 switch (sk->sk_state) {
1036 case BT_CONNECT:
1037 case BT_CONNECT2:
1038 case BT_CONFIG:
1039 /* Already connecting */
1040 goto wait;
1042 case BT_CONNECTED:
1043 /* Already connected */
1044 goto done;
1046 case BT_OPEN:
1047 case BT_BOUND:
1048 /* Can connect */
1049 break;
1051 default:
1052 err = -EBADFD;
1053 goto done;
1056 /* Set destination address and psm */
1057 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1058 l2cap_pi(sk)->psm = la.l2_psm;
1060 err = l2cap_do_connect(sk);
1061 if (err)
1062 goto done;
1064 wait:
1065 err = bt_sock_wait_state(sk, BT_CONNECTED,
1066 sock_sndtimeo(sk, flags & O_NONBLOCK));
1067 done:
1068 release_sock(sk);
1069 return err;
1072 static int l2cap_sock_listen(struct socket *sock, int backlog)
1074 struct sock *sk = sock->sk;
1075 int err = 0;
1077 BT_DBG("sk %p backlog %d", sk, backlog);
1079 lock_sock(sk);
1081 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1082 err = -EBADFD;
1083 goto done;
1086 switch (l2cap_pi(sk)->mode) {
1087 case L2CAP_MODE_BASIC:
1088 break;
1089 case L2CAP_MODE_ERTM:
1090 case L2CAP_MODE_STREAMING:
1091 if (enable_ertm)
1092 break;
1093 /* fall through */
1094 default:
1095 err = -ENOTSUPP;
1096 goto done;
1099 if (!l2cap_pi(sk)->psm) {
1100 bdaddr_t *src = &bt_sk(sk)->src;
1101 u16 psm;
1103 err = -EINVAL;
1105 write_lock_bh(&l2cap_sk_list.lock);
1107 for (psm = 0x1001; psm < 0x1100; psm += 2)
1108 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1109 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1110 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1111 err = 0;
1112 break;
1115 write_unlock_bh(&l2cap_sk_list.lock);
1117 if (err < 0)
1118 goto done;
1121 sk->sk_max_ack_backlog = backlog;
1122 sk->sk_ack_backlog = 0;
1123 sk->sk_state = BT_LISTEN;
1125 done:
1126 release_sock(sk);
1127 return err;
1130 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1132 DECLARE_WAITQUEUE(wait, current);
1133 struct sock *sk = sock->sk, *nsk;
1134 long timeo;
1135 int err = 0;
1137 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1139 if (sk->sk_state != BT_LISTEN) {
1140 err = -EBADFD;
1141 goto done;
1144 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1146 BT_DBG("sk %p timeo %ld", sk, timeo);
1148 /* Wait for an incoming connection. (wake-one). */
1149 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1150 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1151 set_current_state(TASK_INTERRUPTIBLE);
1152 if (!timeo) {
1153 err = -EAGAIN;
1154 break;
1157 release_sock(sk);
1158 timeo = schedule_timeout(timeo);
1159 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1161 if (sk->sk_state != BT_LISTEN) {
1162 err = -EBADFD;
1163 break;
1166 if (signal_pending(current)) {
1167 err = sock_intr_errno(timeo);
1168 break;
1171 set_current_state(TASK_RUNNING);
1172 remove_wait_queue(sk->sk_sleep, &wait);
1174 if (err)
1175 goto done;
1177 newsock->state = SS_CONNECTED;
1179 BT_DBG("new socket %p", nsk);
1181 done:
1182 release_sock(sk);
1183 return err;
1186 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1188 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1189 struct sock *sk = sock->sk;
1191 BT_DBG("sock %p, sk %p", sock, sk);
1193 addr->sa_family = AF_BLUETOOTH;
1194 *len = sizeof(struct sockaddr_l2);
1196 if (peer) {
1197 la->l2_psm = l2cap_pi(sk)->psm;
1198 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1199 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1200 } else {
1201 la->l2_psm = l2cap_pi(sk)->sport;
1202 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1203 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1206 return 0;
1209 static void l2cap_monitor_timeout(unsigned long arg)
1211 struct sock *sk = (void *) arg;
1212 u16 control;
1214 bh_lock_sock(sk);
1215 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1216 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1217 bh_unlock_sock(sk);
1218 return;
1221 l2cap_pi(sk)->retry_count++;
1222 __mod_monitor_timer();
1224 control = L2CAP_CTRL_POLL;
1225 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1226 bh_unlock_sock(sk);
1229 static void l2cap_retrans_timeout(unsigned long arg)
1231 struct sock *sk = (void *) arg;
1232 u16 control;
1234 bh_lock_sock(sk);
1235 l2cap_pi(sk)->retry_count = 1;
1236 __mod_monitor_timer();
1238 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1240 control = L2CAP_CTRL_POLL;
1241 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1242 bh_unlock_sock(sk);
1245 static void l2cap_drop_acked_frames(struct sock *sk)
1247 struct sk_buff *skb;
1249 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1250 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1251 break;
1253 skb = skb_dequeue(TX_QUEUE(sk));
1254 kfree_skb(skb);
1256 l2cap_pi(sk)->unacked_frames--;
1259 if (!l2cap_pi(sk)->unacked_frames)
1260 del_timer(&l2cap_pi(sk)->retrans_timer);
1262 return;
1265 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1267 struct l2cap_pinfo *pi = l2cap_pi(sk);
1268 int err;
1270 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1272 err = hci_send_acl(pi->conn->hcon, skb, 0);
1273 if (err < 0)
1274 kfree_skb(skb);
1276 return err;
1279 static int l2cap_streaming_send(struct sock *sk)
1281 struct sk_buff *skb, *tx_skb;
1282 struct l2cap_pinfo *pi = l2cap_pi(sk);
1283 u16 control, fcs;
1284 int err;
1286 while ((skb = sk->sk_send_head)) {
1287 tx_skb = skb_clone(skb, GFP_ATOMIC);
1289 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1290 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1291 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1293 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1294 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1295 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1298 err = l2cap_do_send(sk, tx_skb);
1299 if (err < 0) {
1300 l2cap_send_disconn_req(pi->conn, sk);
1301 return err;
1304 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1306 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1307 sk->sk_send_head = NULL;
1308 else
1309 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1311 skb = skb_dequeue(TX_QUEUE(sk));
1312 kfree_skb(skb);
1314 return 0;
1317 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1319 struct l2cap_pinfo *pi = l2cap_pi(sk);
1320 struct sk_buff *skb, *tx_skb;
1321 u16 control, fcs;
1322 int err;
1324 skb = skb_peek(TX_QUEUE(sk));
1325 do {
1326 if (bt_cb(skb)->tx_seq != tx_seq) {
1327 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1328 break;
1329 skb = skb_queue_next(TX_QUEUE(sk), skb);
1330 continue;
1333 if (pi->remote_max_tx &&
1334 bt_cb(skb)->retries == pi->remote_max_tx) {
1335 l2cap_send_disconn_req(pi->conn, sk);
1336 break;
1339 tx_skb = skb_clone(skb, GFP_ATOMIC);
1340 bt_cb(skb)->retries++;
1341 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1342 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1343 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1344 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1346 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1347 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1348 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1351 err = l2cap_do_send(sk, tx_skb);
1352 if (err < 0) {
1353 l2cap_send_disconn_req(pi->conn, sk);
1354 return err;
1356 break;
1357 } while(1);
1358 return 0;
1361 static int l2cap_ertm_send(struct sock *sk)
1363 struct sk_buff *skb, *tx_skb;
1364 struct l2cap_pinfo *pi = l2cap_pi(sk);
1365 u16 control, fcs;
1366 int err;
1368 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1369 return 0;
1371 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1372 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1374 if (pi->remote_max_tx &&
1375 bt_cb(skb)->retries == pi->remote_max_tx) {
1376 l2cap_send_disconn_req(pi->conn, sk);
1377 break;
1380 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1384 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1385 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1386 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1387 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1390 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1391 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1392 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1395 err = l2cap_do_send(sk, tx_skb);
1396 if (err < 0) {
1397 l2cap_send_disconn_req(pi->conn, sk);
1398 return err;
1400 __mod_retrans_timer();
1402 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1403 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1405 pi->unacked_frames++;
1407 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1408 sk->sk_send_head = NULL;
1409 else
1410 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1413 return 0;
1416 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1418 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1419 struct sk_buff **frag;
1420 int err, sent = 0;
1422 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1423 return -EFAULT;
1426 sent += count;
1427 len -= count;
1429 /* Continuation fragments (no L2CAP header) */
1430 frag = &skb_shinfo(skb)->frag_list;
1431 while (len) {
1432 count = min_t(unsigned int, conn->mtu, len);
1434 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1435 if (!*frag)
1436 return -EFAULT;
1437 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1438 return -EFAULT;
1440 sent += count;
1441 len -= count;
1443 frag = &(*frag)->next;
1446 return sent;
1449 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1451 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1452 struct sk_buff *skb;
1453 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1454 struct l2cap_hdr *lh;
1456 BT_DBG("sk %p len %d", sk, (int)len);
1458 count = min_t(unsigned int, (conn->mtu - hlen), len);
1459 skb = bt_skb_send_alloc(sk, count + hlen,
1460 msg->msg_flags & MSG_DONTWAIT, &err);
1461 if (!skb)
1462 return ERR_PTR(-ENOMEM);
1464 /* Create L2CAP header */
1465 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1466 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1467 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1468 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1470 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1471 if (unlikely(err < 0)) {
1472 kfree_skb(skb);
1473 return ERR_PTR(err);
1475 return skb;
1478 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1480 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1481 struct sk_buff *skb;
1482 int err, count, hlen = L2CAP_HDR_SIZE;
1483 struct l2cap_hdr *lh;
1485 BT_DBG("sk %p len %d", sk, (int)len);
1487 count = min_t(unsigned int, (conn->mtu - hlen), len);
1488 skb = bt_skb_send_alloc(sk, count + hlen,
1489 msg->msg_flags & MSG_DONTWAIT, &err);
1490 if (!skb)
1491 return ERR_PTR(-ENOMEM);
1493 /* Create L2CAP header */
1494 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1495 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1496 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1498 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1499 if (unlikely(err < 0)) {
1500 kfree_skb(skb);
1501 return ERR_PTR(err);
1503 return skb;
1506 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1508 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1509 struct sk_buff *skb;
1510 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1511 struct l2cap_hdr *lh;
1513 BT_DBG("sk %p len %d", sk, (int)len);
1515 if (sdulen)
1516 hlen += 2;
1518 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1519 hlen += 2;
1521 count = min_t(unsigned int, (conn->mtu - hlen), len);
1522 skb = bt_skb_send_alloc(sk, count + hlen,
1523 msg->msg_flags & MSG_DONTWAIT, &err);
1524 if (!skb)
1525 return ERR_PTR(-ENOMEM);
1527 /* Create L2CAP header */
1528 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1529 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1530 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1531 put_unaligned_le16(control, skb_put(skb, 2));
1532 if (sdulen)
1533 put_unaligned_le16(sdulen, skb_put(skb, 2));
1535 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1536 if (unlikely(err < 0)) {
1537 kfree_skb(skb);
1538 return ERR_PTR(err);
1541 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1542 put_unaligned_le16(0, skb_put(skb, 2));
1544 bt_cb(skb)->retries = 0;
1545 return skb;
1548 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1550 struct l2cap_pinfo *pi = l2cap_pi(sk);
1551 struct sk_buff *skb;
1552 struct sk_buff_head sar_queue;
1553 u16 control;
1554 size_t size = 0;
1556 __skb_queue_head_init(&sar_queue);
1557 control = L2CAP_SDU_START;
1558 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1559 if (IS_ERR(skb))
1560 return PTR_ERR(skb);
1562 __skb_queue_tail(&sar_queue, skb);
1563 len -= pi->max_pdu_size;
1564 size +=pi->max_pdu_size;
1565 control = 0;
1567 while (len > 0) {
1568 size_t buflen;
1570 if (len > pi->max_pdu_size) {
1571 control |= L2CAP_SDU_CONTINUE;
1572 buflen = pi->max_pdu_size;
1573 } else {
1574 control |= L2CAP_SDU_END;
1575 buflen = len;
1578 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1579 if (IS_ERR(skb)) {
1580 skb_queue_purge(&sar_queue);
1581 return PTR_ERR(skb);
1584 __skb_queue_tail(&sar_queue, skb);
1585 len -= buflen;
1586 size += buflen;
1587 control = 0;
1589 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1590 if (sk->sk_send_head == NULL)
1591 sk->sk_send_head = sar_queue.next;
1593 return size;
1596 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1598 struct sock *sk = sock->sk;
1599 struct l2cap_pinfo *pi = l2cap_pi(sk);
1600 struct sk_buff *skb;
1601 u16 control;
1602 int err;
1604 BT_DBG("sock %p, sk %p", sock, sk);
1606 err = sock_error(sk);
1607 if (err)
1608 return err;
1610 if (msg->msg_flags & MSG_OOB)
1611 return -EOPNOTSUPP;
1613 /* Check outgoing MTU */
1614 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1615 len > pi->omtu)
1616 return -EINVAL;
1618 lock_sock(sk);
1620 if (sk->sk_state != BT_CONNECTED) {
1621 err = -ENOTCONN;
1622 goto done;
1625 /* Connectionless channel */
1626 if (sk->sk_type == SOCK_DGRAM) {
1627 skb = l2cap_create_connless_pdu(sk, msg, len);
1628 err = l2cap_do_send(sk, skb);
1629 goto done;
1632 switch (pi->mode) {
1633 case L2CAP_MODE_BASIC:
1634 /* Create a basic PDU */
1635 skb = l2cap_create_basic_pdu(sk, msg, len);
1636 if (IS_ERR(skb)) {
1637 err = PTR_ERR(skb);
1638 goto done;
1641 err = l2cap_do_send(sk, skb);
1642 if (!err)
1643 err = len;
1644 break;
1646 case L2CAP_MODE_ERTM:
1647 case L2CAP_MODE_STREAMING:
1648 /* Entire SDU fits into one PDU */
1649 if (len <= pi->max_pdu_size) {
1650 control = L2CAP_SDU_UNSEGMENTED;
1651 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1652 if (IS_ERR(skb)) {
1653 err = PTR_ERR(skb);
1654 goto done;
1656 __skb_queue_tail(TX_QUEUE(sk), skb);
1657 if (sk->sk_send_head == NULL)
1658 sk->sk_send_head = skb;
1659 } else {
1660 /* Segment SDU into multiples PDUs */
1661 err = l2cap_sar_segment_sdu(sk, msg, len);
1662 if (err < 0)
1663 goto done;
1666 if (pi->mode == L2CAP_MODE_STREAMING)
1667 err = l2cap_streaming_send(sk);
1668 else
1669 err = l2cap_ertm_send(sk);
1671 if (!err)
1672 err = len;
1673 break;
1675 default:
1676 BT_DBG("bad state %1.1x", pi->mode);
1677 err = -EINVAL;
1680 done:
1681 release_sock(sk);
1682 return err;
1685 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1687 struct sock *sk = sock->sk;
1689 lock_sock(sk);
1691 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1692 struct l2cap_conn_rsp rsp;
1694 sk->sk_state = BT_CONFIG;
1696 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1697 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1698 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1699 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1700 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1701 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1703 release_sock(sk);
1704 return 0;
1707 release_sock(sk);
1709 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1712 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1714 struct sock *sk = sock->sk;
1715 struct l2cap_options opts;
1716 int len, err = 0;
1717 u32 opt;
1719 BT_DBG("sk %p", sk);
1721 lock_sock(sk);
1723 switch (optname) {
1724 case L2CAP_OPTIONS:
1725 opts.imtu = l2cap_pi(sk)->imtu;
1726 opts.omtu = l2cap_pi(sk)->omtu;
1727 opts.flush_to = l2cap_pi(sk)->flush_to;
1728 opts.mode = l2cap_pi(sk)->mode;
1729 opts.fcs = l2cap_pi(sk)->fcs;
1731 len = min_t(unsigned int, sizeof(opts), optlen);
1732 if (copy_from_user((char *) &opts, optval, len)) {
1733 err = -EFAULT;
1734 break;
1737 l2cap_pi(sk)->imtu = opts.imtu;
1738 l2cap_pi(sk)->omtu = opts.omtu;
1739 l2cap_pi(sk)->mode = opts.mode;
1740 l2cap_pi(sk)->fcs = opts.fcs;
1741 break;
1743 case L2CAP_LM:
1744 if (get_user(opt, (u32 __user *) optval)) {
1745 err = -EFAULT;
1746 break;
1749 if (opt & L2CAP_LM_AUTH)
1750 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1751 if (opt & L2CAP_LM_ENCRYPT)
1752 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1753 if (opt & L2CAP_LM_SECURE)
1754 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1756 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1757 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1758 break;
1760 default:
1761 err = -ENOPROTOOPT;
1762 break;
1765 release_sock(sk);
1766 return err;
1769 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1771 struct sock *sk = sock->sk;
1772 struct bt_security sec;
1773 int len, err = 0;
1774 u32 opt;
1776 BT_DBG("sk %p", sk);
1778 if (level == SOL_L2CAP)
1779 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1781 if (level != SOL_BLUETOOTH)
1782 return -ENOPROTOOPT;
1784 lock_sock(sk);
1786 switch (optname) {
1787 case BT_SECURITY:
1788 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1789 err = -EINVAL;
1790 break;
1793 sec.level = BT_SECURITY_LOW;
1795 len = min_t(unsigned int, sizeof(sec), optlen);
1796 if (copy_from_user((char *) &sec, optval, len)) {
1797 err = -EFAULT;
1798 break;
1801 if (sec.level < BT_SECURITY_LOW ||
1802 sec.level > BT_SECURITY_HIGH) {
1803 err = -EINVAL;
1804 break;
1807 l2cap_pi(sk)->sec_level = sec.level;
1808 break;
1810 case BT_DEFER_SETUP:
1811 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1812 err = -EINVAL;
1813 break;
1816 if (get_user(opt, (u32 __user *) optval)) {
1817 err = -EFAULT;
1818 break;
1821 bt_sk(sk)->defer_setup = opt;
1822 break;
1824 default:
1825 err = -ENOPROTOOPT;
1826 break;
1829 release_sock(sk);
1830 return err;
1833 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1835 struct sock *sk = sock->sk;
1836 struct l2cap_options opts;
1837 struct l2cap_conninfo cinfo;
1838 int len, err = 0;
1839 u32 opt;
1841 BT_DBG("sk %p", sk);
1843 if (get_user(len, optlen))
1844 return -EFAULT;
1846 lock_sock(sk);
1848 switch (optname) {
1849 case L2CAP_OPTIONS:
1850 opts.imtu = l2cap_pi(sk)->imtu;
1851 opts.omtu = l2cap_pi(sk)->omtu;
1852 opts.flush_to = l2cap_pi(sk)->flush_to;
1853 opts.mode = l2cap_pi(sk)->mode;
1854 opts.fcs = l2cap_pi(sk)->fcs;
1856 len = min_t(unsigned int, len, sizeof(opts));
1857 if (copy_to_user(optval, (char *) &opts, len))
1858 err = -EFAULT;
1860 break;
1862 case L2CAP_LM:
1863 switch (l2cap_pi(sk)->sec_level) {
1864 case BT_SECURITY_LOW:
1865 opt = L2CAP_LM_AUTH;
1866 break;
1867 case BT_SECURITY_MEDIUM:
1868 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1869 break;
1870 case BT_SECURITY_HIGH:
1871 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1872 L2CAP_LM_SECURE;
1873 break;
1874 default:
1875 opt = 0;
1876 break;
1879 if (l2cap_pi(sk)->role_switch)
1880 opt |= L2CAP_LM_MASTER;
1882 if (l2cap_pi(sk)->force_reliable)
1883 opt |= L2CAP_LM_RELIABLE;
1885 if (put_user(opt, (u32 __user *) optval))
1886 err = -EFAULT;
1887 break;
1889 case L2CAP_CONNINFO:
1890 if (sk->sk_state != BT_CONNECTED &&
1891 !(sk->sk_state == BT_CONNECT2 &&
1892 bt_sk(sk)->defer_setup)) {
1893 err = -ENOTCONN;
1894 break;
1897 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1898 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1900 len = min_t(unsigned int, len, sizeof(cinfo));
1901 if (copy_to_user(optval, (char *) &cinfo, len))
1902 err = -EFAULT;
1904 break;
1906 default:
1907 err = -ENOPROTOOPT;
1908 break;
1911 release_sock(sk);
1912 return err;
1915 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1917 struct sock *sk = sock->sk;
1918 struct bt_security sec;
1919 int len, err = 0;
1921 BT_DBG("sk %p", sk);
1923 if (level == SOL_L2CAP)
1924 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1926 if (level != SOL_BLUETOOTH)
1927 return -ENOPROTOOPT;
1929 if (get_user(len, optlen))
1930 return -EFAULT;
1932 lock_sock(sk);
1934 switch (optname) {
1935 case BT_SECURITY:
1936 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1937 err = -EINVAL;
1938 break;
1941 sec.level = l2cap_pi(sk)->sec_level;
1943 len = min_t(unsigned int, len, sizeof(sec));
1944 if (copy_to_user(optval, (char *) &sec, len))
1945 err = -EFAULT;
1947 break;
1949 case BT_DEFER_SETUP:
1950 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1951 err = -EINVAL;
1952 break;
1955 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1956 err = -EFAULT;
1958 break;
1960 default:
1961 err = -ENOPROTOOPT;
1962 break;
1965 release_sock(sk);
1966 return err;
1969 static int l2cap_sock_shutdown(struct socket *sock, int how)
1971 struct sock *sk = sock->sk;
1972 int err = 0;
1974 BT_DBG("sock %p, sk %p", sock, sk);
1976 if (!sk)
1977 return 0;
1979 lock_sock(sk);
1980 if (!sk->sk_shutdown) {
1981 sk->sk_shutdown = SHUTDOWN_MASK;
1982 l2cap_sock_clear_timer(sk);
1983 __l2cap_sock_close(sk, 0);
1985 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1986 err = bt_sock_wait_state(sk, BT_CLOSED,
1987 sk->sk_lingertime);
1989 release_sock(sk);
1990 return err;
1993 static int l2cap_sock_release(struct socket *sock)
1995 struct sock *sk = sock->sk;
1996 int err;
1998 BT_DBG("sock %p, sk %p", sock, sk);
2000 if (!sk)
2001 return 0;
2003 err = l2cap_sock_shutdown(sock, 2);
2005 sock_orphan(sk);
2006 l2cap_sock_kill(sk);
2007 return err;
2010 static void l2cap_chan_ready(struct sock *sk)
2012 struct sock *parent = bt_sk(sk)->parent;
2014 BT_DBG("sk %p, parent %p", sk, parent);
2016 l2cap_pi(sk)->conf_state = 0;
2017 l2cap_sock_clear_timer(sk);
2019 if (!parent) {
2020 /* Outgoing channel.
2021 * Wake up socket sleeping on connect.
2023 sk->sk_state = BT_CONNECTED;
2024 sk->sk_state_change(sk);
2025 } else {
2026 /* Incoming channel.
2027 * Wake up socket sleeping on accept.
2029 parent->sk_data_ready(parent, 0);
2033 /* Copy frame to all raw sockets on that connection */
2034 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2036 struct l2cap_chan_list *l = &conn->chan_list;
2037 struct sk_buff *nskb;
2038 struct sock *sk;
2040 BT_DBG("conn %p", conn);
2042 read_lock(&l->lock);
2043 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2044 if (sk->sk_type != SOCK_RAW)
2045 continue;
2047 /* Don't send frame to the socket it came from */
2048 if (skb->sk == sk)
2049 continue;
2050 nskb = skb_clone(skb, GFP_ATOMIC);
2051 if (!nskb)
2052 continue;
2054 if (sock_queue_rcv_skb(sk, nskb))
2055 kfree_skb(nskb);
2057 read_unlock(&l->lock);
2060 /* ---- L2CAP signalling commands ---- */
2061 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2062 u8 code, u8 ident, u16 dlen, void *data)
2064 struct sk_buff *skb, **frag;
2065 struct l2cap_cmd_hdr *cmd;
2066 struct l2cap_hdr *lh;
2067 int len, count;
2069 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2070 conn, code, ident, dlen);
2072 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2073 count = min_t(unsigned int, conn->mtu, len);
2075 skb = bt_skb_alloc(count, GFP_ATOMIC);
2076 if (!skb)
2077 return NULL;
2079 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2080 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2081 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2083 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2084 cmd->code = code;
2085 cmd->ident = ident;
2086 cmd->len = cpu_to_le16(dlen);
2088 if (dlen) {
2089 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2090 memcpy(skb_put(skb, count), data, count);
2091 data += count;
2094 len -= skb->len;
2096 /* Continuation fragments (no L2CAP header) */
2097 frag = &skb_shinfo(skb)->frag_list;
2098 while (len) {
2099 count = min_t(unsigned int, conn->mtu, len);
2101 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2102 if (!*frag)
2103 goto fail;
2105 memcpy(skb_put(*frag, count), data, count);
2107 len -= count;
2108 data += count;
2110 frag = &(*frag)->next;
2113 return skb;
2115 fail:
2116 kfree_skb(skb);
2117 return NULL;
2120 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2122 struct l2cap_conf_opt *opt = *ptr;
2123 int len;
2125 len = L2CAP_CONF_OPT_SIZE + opt->len;
2126 *ptr += len;
2128 *type = opt->type;
2129 *olen = opt->len;
2131 switch (opt->len) {
2132 case 1:
2133 *val = *((u8 *) opt->val);
2134 break;
2136 case 2:
2137 *val = __le16_to_cpu(*((__le16 *) opt->val));
2138 break;
2140 case 4:
2141 *val = __le32_to_cpu(*((__le32 *) opt->val));
2142 break;
2144 default:
2145 *val = (unsigned long) opt->val;
2146 break;
2149 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2150 return len;
2153 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2155 struct l2cap_conf_opt *opt = *ptr;
2157 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2159 opt->type = type;
2160 opt->len = len;
2162 switch (len) {
2163 case 1:
2164 *((u8 *) opt->val) = val;
2165 break;
2167 case 2:
2168 *((__le16 *) opt->val) = cpu_to_le16(val);
2169 break;
2171 case 4:
2172 *((__le32 *) opt->val) = cpu_to_le32(val);
2173 break;
2175 default:
2176 memcpy(opt->val, (void *) val, len);
2177 break;
2180 *ptr += L2CAP_CONF_OPT_SIZE + len;
2183 static inline void l2cap_ertm_init(struct sock *sk)
2185 l2cap_pi(sk)->expected_ack_seq = 0;
2186 l2cap_pi(sk)->unacked_frames = 0;
2187 l2cap_pi(sk)->buffer_seq = 0;
2188 l2cap_pi(sk)->num_to_ack = 0;
2190 setup_timer(&l2cap_pi(sk)->retrans_timer,
2191 l2cap_retrans_timeout, (unsigned long) sk);
2192 setup_timer(&l2cap_pi(sk)->monitor_timer,
2193 l2cap_monitor_timeout, (unsigned long) sk);
2195 __skb_queue_head_init(SREJ_QUEUE(sk));
2198 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2200 u32 local_feat_mask = l2cap_feat_mask;
2201 if (enable_ertm)
2202 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2204 switch (mode) {
2205 case L2CAP_MODE_ERTM:
2206 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2207 case L2CAP_MODE_STREAMING:
2208 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2209 default:
2210 return 0x00;
2214 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2216 switch (mode) {
2217 case L2CAP_MODE_STREAMING:
2218 case L2CAP_MODE_ERTM:
2219 if (l2cap_mode_supported(mode, remote_feat_mask))
2220 return mode;
2221 /* fall through */
2222 default:
2223 return L2CAP_MODE_BASIC;
2227 static int l2cap_build_conf_req(struct sock *sk, void *data)
2229 struct l2cap_pinfo *pi = l2cap_pi(sk);
2230 struct l2cap_conf_req *req = data;
2231 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2232 void *ptr = req->data;
2234 BT_DBG("sk %p", sk);
2236 if (pi->num_conf_req || pi->num_conf_rsp)
2237 goto done;
2239 switch (pi->mode) {
2240 case L2CAP_MODE_STREAMING:
2241 case L2CAP_MODE_ERTM:
2242 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2243 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2244 l2cap_send_disconn_req(pi->conn, sk);
2245 break;
2246 default:
2247 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2248 break;
2251 done:
2252 switch (pi->mode) {
2253 case L2CAP_MODE_BASIC:
2254 if (pi->imtu != L2CAP_DEFAULT_MTU)
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2256 break;
2258 case L2CAP_MODE_ERTM:
2259 rfc.mode = L2CAP_MODE_ERTM;
2260 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2261 rfc.max_transmit = max_transmit;
2262 rfc.retrans_timeout = 0;
2263 rfc.monitor_timeout = 0;
2264 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2267 sizeof(rfc), (unsigned long) &rfc);
2269 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2270 break;
2272 if (pi->fcs == L2CAP_FCS_NONE ||
2273 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2274 pi->fcs = L2CAP_FCS_NONE;
2275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2277 break;
2279 case L2CAP_MODE_STREAMING:
2280 rfc.mode = L2CAP_MODE_STREAMING;
2281 rfc.txwin_size = 0;
2282 rfc.max_transmit = 0;
2283 rfc.retrans_timeout = 0;
2284 rfc.monitor_timeout = 0;
2285 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2288 sizeof(rfc), (unsigned long) &rfc);
2290 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2291 break;
2293 if (pi->fcs == L2CAP_FCS_NONE ||
2294 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2295 pi->fcs = L2CAP_FCS_NONE;
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2298 break;
2301 /* FIXME: Need actual value of the flush timeout */
2302 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2303 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2305 req->dcid = cpu_to_le16(pi->dcid);
2306 req->flags = cpu_to_le16(0);
2308 return ptr - data;
2311 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2313 struct l2cap_pinfo *pi = l2cap_pi(sk);
2314 struct l2cap_conf_rsp *rsp = data;
2315 void *ptr = rsp->data;
2316 void *req = pi->conf_req;
2317 int len = pi->conf_len;
2318 int type, hint, olen;
2319 unsigned long val;
2320 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2321 u16 mtu = L2CAP_DEFAULT_MTU;
2322 u16 result = L2CAP_CONF_SUCCESS;
2324 BT_DBG("sk %p", sk);
2326 while (len >= L2CAP_CONF_OPT_SIZE) {
2327 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2329 hint = type & L2CAP_CONF_HINT;
2330 type &= L2CAP_CONF_MASK;
2332 switch (type) {
2333 case L2CAP_CONF_MTU:
2334 mtu = val;
2335 break;
2337 case L2CAP_CONF_FLUSH_TO:
2338 pi->flush_to = val;
2339 break;
2341 case L2CAP_CONF_QOS:
2342 break;
2344 case L2CAP_CONF_RFC:
2345 if (olen == sizeof(rfc))
2346 memcpy(&rfc, (void *) val, olen);
2347 break;
2349 case L2CAP_CONF_FCS:
2350 if (val == L2CAP_FCS_NONE)
2351 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2353 break;
2355 default:
2356 if (hint)
2357 break;
2359 result = L2CAP_CONF_UNKNOWN;
2360 *((u8 *) ptr++) = type;
2361 break;
2365 if (pi->num_conf_rsp || pi->num_conf_req)
2366 goto done;
2368 switch (pi->mode) {
2369 case L2CAP_MODE_STREAMING:
2370 case L2CAP_MODE_ERTM:
2371 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2372 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2373 return -ECONNREFUSED;
2374 break;
2375 default:
2376 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2377 break;
2380 done:
2381 if (pi->mode != rfc.mode) {
2382 result = L2CAP_CONF_UNACCEPT;
2383 rfc.mode = pi->mode;
2385 if (pi->num_conf_rsp == 1)
2386 return -ECONNREFUSED;
2388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2389 sizeof(rfc), (unsigned long) &rfc);
2393 if (result == L2CAP_CONF_SUCCESS) {
2394 /* Configure output options and let the other side know
2395 * which ones we don't like. */
2397 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2398 result = L2CAP_CONF_UNACCEPT;
2399 else {
2400 pi->omtu = mtu;
2401 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2405 switch (rfc.mode) {
2406 case L2CAP_MODE_BASIC:
2407 pi->fcs = L2CAP_FCS_NONE;
2408 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2409 break;
2411 case L2CAP_MODE_ERTM:
2412 pi->remote_tx_win = rfc.txwin_size;
2413 pi->remote_max_tx = rfc.max_transmit;
2414 pi->max_pdu_size = rfc.max_pdu_size;
2416 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2417 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2419 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2422 sizeof(rfc), (unsigned long) &rfc);
2424 break;
2426 case L2CAP_MODE_STREAMING:
2427 pi->remote_tx_win = rfc.txwin_size;
2428 pi->max_pdu_size = rfc.max_pdu_size;
2430 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2433 sizeof(rfc), (unsigned long) &rfc);
2435 break;
2437 default:
2438 result = L2CAP_CONF_UNACCEPT;
2440 memset(&rfc, 0, sizeof(rfc));
2441 rfc.mode = pi->mode;
2444 if (result == L2CAP_CONF_SUCCESS)
2445 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2447 rsp->scid = cpu_to_le16(pi->dcid);
2448 rsp->result = cpu_to_le16(result);
2449 rsp->flags = cpu_to_le16(0x0000);
2451 return ptr - data;
2454 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2456 struct l2cap_pinfo *pi = l2cap_pi(sk);
2457 struct l2cap_conf_req *req = data;
2458 void *ptr = req->data;
2459 int type, olen;
2460 unsigned long val;
2461 struct l2cap_conf_rfc rfc;
2463 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2465 while (len >= L2CAP_CONF_OPT_SIZE) {
2466 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2468 switch (type) {
2469 case L2CAP_CONF_MTU:
2470 if (val < L2CAP_DEFAULT_MIN_MTU) {
2471 *result = L2CAP_CONF_UNACCEPT;
2472 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2473 } else
2474 pi->omtu = val;
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2476 break;
2478 case L2CAP_CONF_FLUSH_TO:
2479 pi->flush_to = val;
2480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2481 2, pi->flush_to);
2482 break;
2484 case L2CAP_CONF_RFC:
2485 if (olen == sizeof(rfc))
2486 memcpy(&rfc, (void *)val, olen);
2488 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2489 rfc.mode != pi->mode)
2490 return -ECONNREFUSED;
2492 pi->mode = rfc.mode;
2493 pi->fcs = 0;
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2496 sizeof(rfc), (unsigned long) &rfc);
2497 break;
2501 if (*result == L2CAP_CONF_SUCCESS) {
2502 switch (rfc.mode) {
2503 case L2CAP_MODE_ERTM:
2504 pi->remote_tx_win = rfc.txwin_size;
2505 pi->retrans_timeout = rfc.retrans_timeout;
2506 pi->monitor_timeout = rfc.monitor_timeout;
2507 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2508 break;
2509 case L2CAP_MODE_STREAMING:
2510 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2511 break;
2515 req->dcid = cpu_to_le16(pi->dcid);
2516 req->flags = cpu_to_le16(0x0000);
2518 return ptr - data;
2521 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2523 struct l2cap_conf_rsp *rsp = data;
2524 void *ptr = rsp->data;
2526 BT_DBG("sk %p", sk);
2528 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2529 rsp->result = cpu_to_le16(result);
2530 rsp->flags = cpu_to_le16(flags);
2532 return ptr - data;
2535 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2537 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2539 if (rej->reason != 0x0000)
2540 return 0;
2542 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2543 cmd->ident == conn->info_ident) {
2544 del_timer(&conn->info_timer);
2546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2547 conn->info_ident = 0;
2549 l2cap_conn_start(conn);
2552 return 0;
2555 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2557 struct l2cap_chan_list *list = &conn->chan_list;
2558 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2559 struct l2cap_conn_rsp rsp;
2560 struct sock *sk, *parent;
2561 int result, status = L2CAP_CS_NO_INFO;
2563 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2564 __le16 psm = req->psm;
2566 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2568 /* Check if we have socket listening on psm */
2569 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2570 if (!parent) {
2571 result = L2CAP_CR_BAD_PSM;
2572 goto sendresp;
2575 /* Check if the ACL is secure enough (if not SDP) */
2576 if (psm != cpu_to_le16(0x0001) &&
2577 !hci_conn_check_link_mode(conn->hcon)) {
2578 conn->disc_reason = 0x05;
2579 result = L2CAP_CR_SEC_BLOCK;
2580 goto response;
2583 result = L2CAP_CR_NO_MEM;
2585 /* Check for backlog size */
2586 if (sk_acceptq_is_full(parent)) {
2587 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2588 goto response;
2591 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2592 if (!sk)
2593 goto response;
2595 write_lock_bh(&list->lock);
2597 /* Check if we already have channel with that dcid */
2598 if (__l2cap_get_chan_by_dcid(list, scid)) {
2599 write_unlock_bh(&list->lock);
2600 sock_set_flag(sk, SOCK_ZAPPED);
2601 l2cap_sock_kill(sk);
2602 goto response;
2605 hci_conn_hold(conn->hcon);
2607 l2cap_sock_init(sk, parent);
2608 bacpy(&bt_sk(sk)->src, conn->src);
2609 bacpy(&bt_sk(sk)->dst, conn->dst);
2610 l2cap_pi(sk)->psm = psm;
2611 l2cap_pi(sk)->dcid = scid;
2613 __l2cap_chan_add(conn, sk, parent);
2614 dcid = l2cap_pi(sk)->scid;
2616 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2618 l2cap_pi(sk)->ident = cmd->ident;
2620 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2621 if (l2cap_check_security(sk)) {
2622 if (bt_sk(sk)->defer_setup) {
2623 sk->sk_state = BT_CONNECT2;
2624 result = L2CAP_CR_PEND;
2625 status = L2CAP_CS_AUTHOR_PEND;
2626 parent->sk_data_ready(parent, 0);
2627 } else {
2628 sk->sk_state = BT_CONFIG;
2629 result = L2CAP_CR_SUCCESS;
2630 status = L2CAP_CS_NO_INFO;
2632 } else {
2633 sk->sk_state = BT_CONNECT2;
2634 result = L2CAP_CR_PEND;
2635 status = L2CAP_CS_AUTHEN_PEND;
2637 } else {
2638 sk->sk_state = BT_CONNECT2;
2639 result = L2CAP_CR_PEND;
2640 status = L2CAP_CS_NO_INFO;
2643 write_unlock_bh(&list->lock);
2645 response:
2646 bh_unlock_sock(parent);
2648 sendresp:
2649 rsp.scid = cpu_to_le16(scid);
2650 rsp.dcid = cpu_to_le16(dcid);
2651 rsp.result = cpu_to_le16(result);
2652 rsp.status = cpu_to_le16(status);
2653 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2655 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2656 struct l2cap_info_req info;
2657 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2660 conn->info_ident = l2cap_get_ident(conn);
2662 mod_timer(&conn->info_timer, jiffies +
2663 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2665 l2cap_send_cmd(conn, conn->info_ident,
2666 L2CAP_INFO_REQ, sizeof(info), &info);
2669 return 0;
2672 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2674 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2675 u16 scid, dcid, result, status;
2676 struct sock *sk;
2677 u8 req[128];
2679 scid = __le16_to_cpu(rsp->scid);
2680 dcid = __le16_to_cpu(rsp->dcid);
2681 result = __le16_to_cpu(rsp->result);
2682 status = __le16_to_cpu(rsp->status);
2684 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2686 if (scid) {
2687 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2688 if (!sk)
2689 return 0;
2690 } else {
2691 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2692 if (!sk)
2693 return 0;
2696 switch (result) {
2697 case L2CAP_CR_SUCCESS:
2698 sk->sk_state = BT_CONFIG;
2699 l2cap_pi(sk)->ident = 0;
2700 l2cap_pi(sk)->dcid = dcid;
2701 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2703 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2705 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2706 l2cap_build_conf_req(sk, req), req);
2707 l2cap_pi(sk)->num_conf_req++;
2708 break;
2710 case L2CAP_CR_PEND:
2711 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2712 break;
2714 default:
2715 l2cap_chan_del(sk, ECONNREFUSED);
2716 break;
2719 bh_unlock_sock(sk);
2720 return 0;
2723 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2725 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2726 u16 dcid, flags;
2727 u8 rsp[64];
2728 struct sock *sk;
2729 int len;
2731 dcid = __le16_to_cpu(req->dcid);
2732 flags = __le16_to_cpu(req->flags);
2734 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2736 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2737 if (!sk)
2738 return -ENOENT;
2740 if (sk->sk_state == BT_DISCONN)
2741 goto unlock;
2743 /* Reject if config buffer is too small. */
2744 len = cmd_len - sizeof(*req);
2745 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2746 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2747 l2cap_build_conf_rsp(sk, rsp,
2748 L2CAP_CONF_REJECT, flags), rsp);
2749 goto unlock;
2752 /* Store config. */
2753 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2754 l2cap_pi(sk)->conf_len += len;
2756 if (flags & 0x0001) {
2757 /* Incomplete config. Send empty response. */
2758 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2759 l2cap_build_conf_rsp(sk, rsp,
2760 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2761 goto unlock;
2764 /* Complete config. */
2765 len = l2cap_parse_conf_req(sk, rsp);
2766 if (len < 0) {
2767 l2cap_send_disconn_req(conn, sk);
2768 goto unlock;
2771 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2772 l2cap_pi(sk)->num_conf_rsp++;
2774 /* Reset config buffer. */
2775 l2cap_pi(sk)->conf_len = 0;
2777 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2778 goto unlock;
2780 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2781 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2782 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2783 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2785 sk->sk_state = BT_CONNECTED;
2787 l2cap_pi(sk)->next_tx_seq = 0;
2788 l2cap_pi(sk)->expected_tx_seq = 0;
2789 __skb_queue_head_init(TX_QUEUE(sk));
2790 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2791 l2cap_ertm_init(sk);
2793 l2cap_chan_ready(sk);
2794 goto unlock;
2797 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2798 u8 buf[64];
2799 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2800 l2cap_build_conf_req(sk, buf), buf);
2801 l2cap_pi(sk)->num_conf_req++;
2804 unlock:
2805 bh_unlock_sock(sk);
2806 return 0;
2809 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2811 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2812 u16 scid, flags, result;
2813 struct sock *sk;
2815 scid = __le16_to_cpu(rsp->scid);
2816 flags = __le16_to_cpu(rsp->flags);
2817 result = __le16_to_cpu(rsp->result);
2819 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2820 scid, flags, result);
2822 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2823 if (!sk)
2824 return 0;
2826 switch (result) {
2827 case L2CAP_CONF_SUCCESS:
2828 break;
2830 case L2CAP_CONF_UNACCEPT:
2831 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2832 int len = cmd->len - sizeof(*rsp);
2833 char req[64];
2835 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2836 l2cap_send_disconn_req(conn, sk);
2837 goto done;
2840 /* throw out any old stored conf requests */
2841 result = L2CAP_CONF_SUCCESS;
2842 len = l2cap_parse_conf_rsp(sk, rsp->data,
2843 len, req, &result);
2844 if (len < 0) {
2845 l2cap_send_disconn_req(conn, sk);
2846 goto done;
2849 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2850 L2CAP_CONF_REQ, len, req);
2851 l2cap_pi(sk)->num_conf_req++;
2852 if (result != L2CAP_CONF_SUCCESS)
2853 goto done;
2854 break;
2857 default:
2858 sk->sk_state = BT_DISCONN;
2859 sk->sk_err = ECONNRESET;
2860 l2cap_sock_set_timer(sk, HZ * 5);
2861 l2cap_send_disconn_req(conn, sk);
2862 goto done;
2865 if (flags & 0x01)
2866 goto done;
2868 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2870 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2871 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2872 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2873 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2875 sk->sk_state = BT_CONNECTED;
2876 l2cap_pi(sk)->next_tx_seq = 0;
2877 l2cap_pi(sk)->expected_tx_seq = 0;
2878 __skb_queue_head_init(TX_QUEUE(sk));
2879 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2880 l2cap_ertm_init(sk);
2882 l2cap_chan_ready(sk);
2885 done:
2886 bh_unlock_sock(sk);
2887 return 0;
2890 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2892 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2893 struct l2cap_disconn_rsp rsp;
2894 u16 dcid, scid;
2895 struct sock *sk;
2897 scid = __le16_to_cpu(req->scid);
2898 dcid = __le16_to_cpu(req->dcid);
2900 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2903 if (!sk)
2904 return 0;
2906 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2907 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2908 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2910 sk->sk_shutdown = SHUTDOWN_MASK;
2912 skb_queue_purge(TX_QUEUE(sk));
2914 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2915 skb_queue_purge(SREJ_QUEUE(sk));
2916 del_timer(&l2cap_pi(sk)->retrans_timer);
2917 del_timer(&l2cap_pi(sk)->monitor_timer);
2920 l2cap_chan_del(sk, ECONNRESET);
2921 bh_unlock_sock(sk);
2923 l2cap_sock_kill(sk);
2924 return 0;
2927 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2929 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2930 u16 dcid, scid;
2931 struct sock *sk;
2933 scid = __le16_to_cpu(rsp->scid);
2934 dcid = __le16_to_cpu(rsp->dcid);
2936 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2938 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2939 if (!sk)
2940 return 0;
2942 skb_queue_purge(TX_QUEUE(sk));
2944 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2945 skb_queue_purge(SREJ_QUEUE(sk));
2946 del_timer(&l2cap_pi(sk)->retrans_timer);
2947 del_timer(&l2cap_pi(sk)->monitor_timer);
2950 l2cap_chan_del(sk, 0);
2951 bh_unlock_sock(sk);
2953 l2cap_sock_kill(sk);
2954 return 0;
2957 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2959 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2960 u16 type;
2962 type = __le16_to_cpu(req->type);
2964 BT_DBG("type 0x%4.4x", type);
2966 if (type == L2CAP_IT_FEAT_MASK) {
2967 u8 buf[8];
2968 u32 feat_mask = l2cap_feat_mask;
2969 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2970 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2971 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2972 if (enable_ertm)
2973 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2974 | L2CAP_FEAT_FCS;
2975 put_unaligned_le32(feat_mask, rsp->data);
2976 l2cap_send_cmd(conn, cmd->ident,
2977 L2CAP_INFO_RSP, sizeof(buf), buf);
2978 } else if (type == L2CAP_IT_FIXED_CHAN) {
2979 u8 buf[12];
2980 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2981 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2982 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2983 memcpy(buf + 4, l2cap_fixed_chan, 8);
2984 l2cap_send_cmd(conn, cmd->ident,
2985 L2CAP_INFO_RSP, sizeof(buf), buf);
2986 } else {
2987 struct l2cap_info_rsp rsp;
2988 rsp.type = cpu_to_le16(type);
2989 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2990 l2cap_send_cmd(conn, cmd->ident,
2991 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2994 return 0;
2997 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2999 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3000 u16 type, result;
3002 type = __le16_to_cpu(rsp->type);
3003 result = __le16_to_cpu(rsp->result);
3005 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3007 del_timer(&conn->info_timer);
3009 if (type == L2CAP_IT_FEAT_MASK) {
3010 conn->feat_mask = get_unaligned_le32(rsp->data);
3012 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3013 struct l2cap_info_req req;
3014 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3016 conn->info_ident = l2cap_get_ident(conn);
3018 l2cap_send_cmd(conn, conn->info_ident,
3019 L2CAP_INFO_REQ, sizeof(req), &req);
3020 } else {
3021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3022 conn->info_ident = 0;
3024 l2cap_conn_start(conn);
3026 } else if (type == L2CAP_IT_FIXED_CHAN) {
3027 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3028 conn->info_ident = 0;
3030 l2cap_conn_start(conn);
3033 return 0;
3036 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3038 u8 *data = skb->data;
3039 int len = skb->len;
3040 struct l2cap_cmd_hdr cmd;
3041 int err = 0;
3043 l2cap_raw_recv(conn, skb);
3045 while (len >= L2CAP_CMD_HDR_SIZE) {
3046 u16 cmd_len;
3047 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3048 data += L2CAP_CMD_HDR_SIZE;
3049 len -= L2CAP_CMD_HDR_SIZE;
3051 cmd_len = le16_to_cpu(cmd.len);
3053 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3055 if (cmd_len > len || !cmd.ident) {
3056 BT_DBG("corrupted command");
3057 break;
3060 switch (cmd.code) {
3061 case L2CAP_COMMAND_REJ:
3062 l2cap_command_rej(conn, &cmd, data);
3063 break;
3065 case L2CAP_CONN_REQ:
3066 err = l2cap_connect_req(conn, &cmd, data);
3067 break;
3069 case L2CAP_CONN_RSP:
3070 err = l2cap_connect_rsp(conn, &cmd, data);
3071 break;
3073 case L2CAP_CONF_REQ:
3074 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3075 break;
3077 case L2CAP_CONF_RSP:
3078 err = l2cap_config_rsp(conn, &cmd, data);
3079 break;
3081 case L2CAP_DISCONN_REQ:
3082 err = l2cap_disconnect_req(conn, &cmd, data);
3083 break;
3085 case L2CAP_DISCONN_RSP:
3086 err = l2cap_disconnect_rsp(conn, &cmd, data);
3087 break;
3089 case L2CAP_ECHO_REQ:
3090 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3091 break;
3093 case L2CAP_ECHO_RSP:
3094 break;
3096 case L2CAP_INFO_REQ:
3097 err = l2cap_information_req(conn, &cmd, data);
3098 break;
3100 case L2CAP_INFO_RSP:
3101 err = l2cap_information_rsp(conn, &cmd, data);
3102 break;
3104 default:
3105 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3106 err = -EINVAL;
3107 break;
3110 if (err) {
3111 struct l2cap_cmd_rej rej;
3112 BT_DBG("error %d", err);
3114 /* FIXME: Map err to a valid reason */
3115 rej.reason = cpu_to_le16(0);
3116 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3119 data += cmd_len;
3120 len -= cmd_len;
3123 kfree_skb(skb);
3126 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3128 u16 our_fcs, rcv_fcs;
3129 int hdr_size = L2CAP_HDR_SIZE + 2;
3131 if (pi->fcs == L2CAP_FCS_CRC16) {
3132 skb_trim(skb, skb->len - 2);
3133 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3134 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3136 if (our_fcs != rcv_fcs)
3137 return -EINVAL;
3139 return 0;
3142 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3144 struct sk_buff *next_skb;
3146 bt_cb(skb)->tx_seq = tx_seq;
3147 bt_cb(skb)->sar = sar;
3149 next_skb = skb_peek(SREJ_QUEUE(sk));
3150 if (!next_skb) {
3151 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3152 return;
3155 do {
3156 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3157 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3158 return;
3161 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3162 break;
3164 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3166 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3169 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3171 struct l2cap_pinfo *pi = l2cap_pi(sk);
3172 struct sk_buff *_skb;
3173 int err = -EINVAL;
3175 switch (control & L2CAP_CTRL_SAR) {
3176 case L2CAP_SDU_UNSEGMENTED:
3177 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3178 kfree_skb(pi->sdu);
3179 break;
3182 err = sock_queue_rcv_skb(sk, skb);
3183 if (!err)
3184 return 0;
3186 break;
3188 case L2CAP_SDU_START:
3189 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3190 kfree_skb(pi->sdu);
3191 break;
3194 pi->sdu_len = get_unaligned_le16(skb->data);
3195 skb_pull(skb, 2);
3197 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3198 if (!pi->sdu) {
3199 err = -ENOMEM;
3200 break;
3203 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3205 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3206 pi->partial_sdu_len = skb->len;
3207 err = 0;
3208 break;
3210 case L2CAP_SDU_CONTINUE:
3211 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3212 break;
3214 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3216 pi->partial_sdu_len += skb->len;
3217 if (pi->partial_sdu_len > pi->sdu_len)
3218 kfree_skb(pi->sdu);
3219 else
3220 err = 0;
3222 break;
3224 case L2CAP_SDU_END:
3225 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3226 break;
3228 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3230 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3231 pi->partial_sdu_len += skb->len;
3233 if (pi->partial_sdu_len == pi->sdu_len) {
3234 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3235 err = sock_queue_rcv_skb(sk, _skb);
3236 if (err < 0)
3237 kfree_skb(_skb);
3239 kfree_skb(pi->sdu);
3240 err = 0;
3242 break;
3245 kfree_skb(skb);
3246 return err;
3249 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3251 struct sk_buff *skb;
3252 u16 control = 0;
3254 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3255 if (bt_cb(skb)->tx_seq != tx_seq)
3256 break;
3258 skb = skb_dequeue(SREJ_QUEUE(sk));
3259 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3260 l2cap_sar_reassembly_sdu(sk, skb, control);
3261 l2cap_pi(sk)->buffer_seq_srej =
3262 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3263 tx_seq++;
3267 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3269 struct l2cap_pinfo *pi = l2cap_pi(sk);
3270 struct srej_list *l, *tmp;
3271 u16 control;
3273 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3274 if (l->tx_seq == tx_seq) {
3275 list_del(&l->list);
3276 kfree(l);
3277 return;
3279 control = L2CAP_SUPER_SELECT_REJECT;
3280 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3281 l2cap_send_sframe(pi, control);
3282 list_del(&l->list);
3283 list_add_tail(&l->list, SREJ_LIST(sk));
3287 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3289 struct l2cap_pinfo *pi = l2cap_pi(sk);
3290 struct srej_list *new;
3291 u16 control;
3293 while (tx_seq != pi->expected_tx_seq) {
3294 control = L2CAP_SUPER_SELECT_REJECT;
3295 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3296 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3297 control |= L2CAP_CTRL_POLL;
3298 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3300 l2cap_send_sframe(pi, control);
3302 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3303 new->tx_seq = pi->expected_tx_seq++;
3304 list_add_tail(&new->list, SREJ_LIST(sk));
3306 pi->expected_tx_seq++;
3309 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3311 struct l2cap_pinfo *pi = l2cap_pi(sk);
3312 u8 tx_seq = __get_txseq(rx_control);
3313 u8 req_seq = __get_reqseq(rx_control);
3314 u16 tx_control = 0;
3315 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3316 int err = 0;
3318 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3320 pi->expected_ack_seq = req_seq;
3321 l2cap_drop_acked_frames(sk);
3323 if (tx_seq == pi->expected_tx_seq)
3324 goto expected;
3326 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3327 struct srej_list *first;
3329 first = list_first_entry(SREJ_LIST(sk),
3330 struct srej_list, list);
3331 if (tx_seq == first->tx_seq) {
3332 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3333 l2cap_check_srej_gap(sk, tx_seq);
3335 list_del(&first->list);
3336 kfree(first);
3338 if (list_empty(SREJ_LIST(sk))) {
3339 pi->buffer_seq = pi->buffer_seq_srej;
3340 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3342 } else {
3343 struct srej_list *l;
3344 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3346 list_for_each_entry(l, SREJ_LIST(sk), list) {
3347 if (l->tx_seq == tx_seq) {
3348 l2cap_resend_srejframe(sk, tx_seq);
3349 return 0;
3352 l2cap_send_srejframe(sk, tx_seq);
3354 } else {
3355 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3357 INIT_LIST_HEAD(SREJ_LIST(sk));
3358 pi->buffer_seq_srej = pi->buffer_seq;
3360 __skb_queue_head_init(SREJ_QUEUE(sk));
3361 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3363 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3365 l2cap_send_srejframe(sk, tx_seq);
3367 return 0;
3369 expected:
3370 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3372 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3373 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3374 return 0;
3377 if (rx_control & L2CAP_CTRL_FINAL) {
3378 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3379 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3380 else {
3381 sk->sk_send_head = TX_QUEUE(sk)->next;
3382 pi->next_tx_seq = pi->expected_ack_seq;
3383 l2cap_ertm_send(sk);
3387 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3389 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3390 if (err < 0)
3391 return err;
3393 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3394 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3395 tx_control |= L2CAP_SUPER_RCV_READY;
3396 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3397 l2cap_send_sframe(pi, tx_control);
3399 return 0;
3402 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3404 struct l2cap_pinfo *pi = l2cap_pi(sk);
3405 u8 tx_seq = __get_reqseq(rx_control);
3407 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3409 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3410 case L2CAP_SUPER_RCV_READY:
3411 if (rx_control & L2CAP_CTRL_POLL) {
3412 u16 control = L2CAP_CTRL_FINAL;
3413 control |= L2CAP_SUPER_RCV_READY |
3414 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3415 l2cap_send_sframe(l2cap_pi(sk), control);
3416 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3418 } else if (rx_control & L2CAP_CTRL_FINAL) {
3419 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3420 pi->expected_ack_seq = tx_seq;
3421 l2cap_drop_acked_frames(sk);
3423 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3424 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3425 else {
3426 sk->sk_send_head = TX_QUEUE(sk)->next;
3427 pi->next_tx_seq = pi->expected_ack_seq;
3428 l2cap_ertm_send(sk);
3431 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3432 break;
3434 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3435 del_timer(&pi->monitor_timer);
3437 if (pi->unacked_frames > 0)
3438 __mod_retrans_timer();
3439 } else {
3440 pi->expected_ack_seq = tx_seq;
3441 l2cap_drop_acked_frames(sk);
3443 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3444 (pi->unacked_frames > 0))
3445 __mod_retrans_timer();
3447 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3448 l2cap_ertm_send(sk);
3450 break;
3452 case L2CAP_SUPER_REJECT:
3453 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3455 pi->expected_ack_seq = __get_reqseq(rx_control);
3456 l2cap_drop_acked_frames(sk);
3458 if (rx_control & L2CAP_CTRL_FINAL) {
3459 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3460 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3461 else {
3462 sk->sk_send_head = TX_QUEUE(sk)->next;
3463 pi->next_tx_seq = pi->expected_ack_seq;
3464 l2cap_ertm_send(sk);
3466 } else {
3467 sk->sk_send_head = TX_QUEUE(sk)->next;
3468 pi->next_tx_seq = pi->expected_ack_seq;
3469 l2cap_ertm_send(sk);
3471 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3472 pi->srej_save_reqseq = tx_seq;
3473 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3477 break;
3479 case L2CAP_SUPER_SELECT_REJECT:
3480 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3482 if (rx_control & L2CAP_CTRL_POLL) {
3483 pi->expected_ack_seq = tx_seq;
3484 l2cap_drop_acked_frames(sk);
3485 l2cap_retransmit_frame(sk, tx_seq);
3486 l2cap_ertm_send(sk);
3487 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3488 pi->srej_save_reqseq = tx_seq;
3489 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3491 } else if (rx_control & L2CAP_CTRL_FINAL) {
3492 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3493 pi->srej_save_reqseq == tx_seq)
3494 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3495 else
3496 l2cap_retransmit_frame(sk, tx_seq);
3498 else {
3499 l2cap_retransmit_frame(sk, tx_seq);
3500 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3501 pi->srej_save_reqseq = tx_seq;
3502 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3505 break;
3507 case L2CAP_SUPER_RCV_NOT_READY:
3508 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3509 pi->expected_ack_seq = tx_seq;
3510 l2cap_drop_acked_frames(sk);
3512 del_timer(&l2cap_pi(sk)->retrans_timer);
3513 if (rx_control & L2CAP_CTRL_POLL) {
3514 u16 control = L2CAP_CTRL_FINAL;
3515 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3517 break;
3520 return 0;
3523 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3525 struct sock *sk;
3526 struct l2cap_pinfo *pi;
3527 u16 control, len;
3528 u8 tx_seq;
3530 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3531 if (!sk) {
3532 BT_DBG("unknown cid 0x%4.4x", cid);
3533 goto drop;
3536 pi = l2cap_pi(sk);
3538 BT_DBG("sk %p, len %d", sk, skb->len);
3540 if (sk->sk_state != BT_CONNECTED)
3541 goto drop;
3543 switch (pi->mode) {
3544 case L2CAP_MODE_BASIC:
3545 /* If socket recv buffers overflows we drop data here
3546 * which is *bad* because L2CAP has to be reliable.
3547 * But we don't have any other choice. L2CAP doesn't
3548 * provide flow control mechanism. */
3550 if (pi->imtu < skb->len)
3551 goto drop;
3553 if (!sock_queue_rcv_skb(sk, skb))
3554 goto done;
3555 break;
3557 case L2CAP_MODE_ERTM:
3558 control = get_unaligned_le16(skb->data);
3559 skb_pull(skb, 2);
3560 len = skb->len;
3562 if (__is_sar_start(control))
3563 len -= 2;
3565 if (pi->fcs == L2CAP_FCS_CRC16)
3566 len -= 2;
3569 * We can just drop the corrupted I-frame here.
3570 * Receiver will miss it and start proper recovery
3571 * procedures and ask retransmission.
3573 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3574 goto drop;
3576 if (l2cap_check_fcs(pi, skb))
3577 goto drop;
3579 if (__is_iframe(control))
3580 l2cap_data_channel_iframe(sk, control, skb);
3581 else
3582 l2cap_data_channel_sframe(sk, control, skb);
3584 goto done;
3586 case L2CAP_MODE_STREAMING:
3587 control = get_unaligned_le16(skb->data);
3588 skb_pull(skb, 2);
3589 len = skb->len;
3591 if (__is_sar_start(control))
3592 len -= 2;
3594 if (pi->fcs == L2CAP_FCS_CRC16)
3595 len -= 2;
3597 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3598 goto drop;
3600 if (l2cap_check_fcs(pi, skb))
3601 goto drop;
3603 tx_seq = __get_txseq(control);
3605 if (pi->expected_tx_seq == tx_seq)
3606 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3607 else
3608 pi->expected_tx_seq = tx_seq + 1;
3610 l2cap_sar_reassembly_sdu(sk, skb, control);
3612 goto done;
3614 default:
3615 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3616 break;
3619 drop:
3620 kfree_skb(skb);
3622 done:
3623 if (sk)
3624 bh_unlock_sock(sk);
3626 return 0;
3629 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3631 struct sock *sk;
3633 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3634 if (!sk)
3635 goto drop;
3637 BT_DBG("sk %p, len %d", sk, skb->len);
3639 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3640 goto drop;
3642 if (l2cap_pi(sk)->imtu < skb->len)
3643 goto drop;
3645 if (!sock_queue_rcv_skb(sk, skb))
3646 goto done;
3648 drop:
3649 kfree_skb(skb);
3651 done:
3652 if (sk)
3653 bh_unlock_sock(sk);
3654 return 0;
3657 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3659 struct l2cap_hdr *lh = (void *) skb->data;
3660 u16 cid, len;
3661 __le16 psm;
3663 skb_pull(skb, L2CAP_HDR_SIZE);
3664 cid = __le16_to_cpu(lh->cid);
3665 len = __le16_to_cpu(lh->len);
3667 if (len != skb->len) {
3668 kfree_skb(skb);
3669 return;
3672 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3674 switch (cid) {
3675 case L2CAP_CID_SIGNALING:
3676 l2cap_sig_channel(conn, skb);
3677 break;
3679 case L2CAP_CID_CONN_LESS:
3680 psm = get_unaligned_le16(skb->data);
3681 skb_pull(skb, 2);
3682 l2cap_conless_channel(conn, psm, skb);
3683 break;
3685 default:
3686 l2cap_data_channel(conn, cid, skb);
3687 break;
3691 /* ---- L2CAP interface with lower layer (HCI) ---- */
3693 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3695 int exact = 0, lm1 = 0, lm2 = 0;
3696 register struct sock *sk;
3697 struct hlist_node *node;
3699 if (type != ACL_LINK)
3700 return 0;
3702 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3704 /* Find listening sockets and check their link_mode */
3705 read_lock(&l2cap_sk_list.lock);
3706 sk_for_each(sk, node, &l2cap_sk_list.head) {
3707 if (sk->sk_state != BT_LISTEN)
3708 continue;
3710 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3711 lm1 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch)
3713 lm1 |= HCI_LM_MASTER;
3714 exact++;
3715 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3716 lm2 |= HCI_LM_ACCEPT;
3717 if (l2cap_pi(sk)->role_switch)
3718 lm2 |= HCI_LM_MASTER;
3721 read_unlock(&l2cap_sk_list.lock);
3723 return exact ? lm1 : lm2;
3726 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3728 struct l2cap_conn *conn;
3730 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3732 if (hcon->type != ACL_LINK)
3733 return 0;
3735 if (!status) {
3736 conn = l2cap_conn_add(hcon, status);
3737 if (conn)
3738 l2cap_conn_ready(conn);
3739 } else
3740 l2cap_conn_del(hcon, bt_err(status));
3742 return 0;
3745 static int l2cap_disconn_ind(struct hci_conn *hcon)
3747 struct l2cap_conn *conn = hcon->l2cap_data;
3749 BT_DBG("hcon %p", hcon);
3751 if (hcon->type != ACL_LINK || !conn)
3752 return 0x13;
3754 return conn->disc_reason;
3757 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3759 BT_DBG("hcon %p reason %d", hcon, reason);
3761 if (hcon->type != ACL_LINK)
3762 return 0;
3764 l2cap_conn_del(hcon, bt_err(reason));
3766 return 0;
3769 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3771 if (sk->sk_type != SOCK_SEQPACKET)
3772 return;
3774 if (encrypt == 0x00) {
3775 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3776 l2cap_sock_clear_timer(sk);
3777 l2cap_sock_set_timer(sk, HZ * 5);
3778 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3779 __l2cap_sock_close(sk, ECONNREFUSED);
3780 } else {
3781 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3782 l2cap_sock_clear_timer(sk);
3786 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3788 struct l2cap_chan_list *l;
3789 struct l2cap_conn *conn = hcon->l2cap_data;
3790 struct sock *sk;
3792 if (!conn)
3793 return 0;
3795 l = &conn->chan_list;
3797 BT_DBG("conn %p", conn);
3799 read_lock(&l->lock);
3801 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3802 bh_lock_sock(sk);
3804 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3805 bh_unlock_sock(sk);
3806 continue;
3809 if (!status && (sk->sk_state == BT_CONNECTED ||
3810 sk->sk_state == BT_CONFIG)) {
3811 l2cap_check_encryption(sk, encrypt);
3812 bh_unlock_sock(sk);
3813 continue;
3816 if (sk->sk_state == BT_CONNECT) {
3817 if (!status) {
3818 struct l2cap_conn_req req;
3819 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3820 req.psm = l2cap_pi(sk)->psm;
3822 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3824 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3825 L2CAP_CONN_REQ, sizeof(req), &req);
3826 } else {
3827 l2cap_sock_clear_timer(sk);
3828 l2cap_sock_set_timer(sk, HZ / 10);
3830 } else if (sk->sk_state == BT_CONNECT2) {
3831 struct l2cap_conn_rsp rsp;
3832 __u16 result;
3834 if (!status) {
3835 sk->sk_state = BT_CONFIG;
3836 result = L2CAP_CR_SUCCESS;
3837 } else {
3838 sk->sk_state = BT_DISCONN;
3839 l2cap_sock_set_timer(sk, HZ / 10);
3840 result = L2CAP_CR_SEC_BLOCK;
3843 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3844 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3845 rsp.result = cpu_to_le16(result);
3846 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3847 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3848 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3851 bh_unlock_sock(sk);
3854 read_unlock(&l->lock);
3856 return 0;
3859 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3861 struct l2cap_conn *conn = hcon->l2cap_data;
3863 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3864 goto drop;
3866 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3868 if (flags & ACL_START) {
3869 struct l2cap_hdr *hdr;
3870 int len;
3872 if (conn->rx_len) {
3873 BT_ERR("Unexpected start frame (len %d)", skb->len);
3874 kfree_skb(conn->rx_skb);
3875 conn->rx_skb = NULL;
3876 conn->rx_len = 0;
3877 l2cap_conn_unreliable(conn, ECOMM);
3880 if (skb->len < 2) {
3881 BT_ERR("Frame is too short (len %d)", skb->len);
3882 l2cap_conn_unreliable(conn, ECOMM);
3883 goto drop;
3886 hdr = (struct l2cap_hdr *) skb->data;
3887 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3889 if (len == skb->len) {
3890 /* Complete frame received */
3891 l2cap_recv_frame(conn, skb);
3892 return 0;
3895 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3897 if (skb->len > len) {
3898 BT_ERR("Frame is too long (len %d, expected len %d)",
3899 skb->len, len);
3900 l2cap_conn_unreliable(conn, ECOMM);
3901 goto drop;
3904 /* Allocate skb for the complete frame (with header) */
3905 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3906 if (!conn->rx_skb)
3907 goto drop;
3909 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3910 skb->len);
3911 conn->rx_len = len - skb->len;
3912 } else {
3913 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3915 if (!conn->rx_len) {
3916 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3917 l2cap_conn_unreliable(conn, ECOMM);
3918 goto drop;
3921 if (skb->len > conn->rx_len) {
3922 BT_ERR("Fragment is too long (len %d, expected %d)",
3923 skb->len, conn->rx_len);
3924 kfree_skb(conn->rx_skb);
3925 conn->rx_skb = NULL;
3926 conn->rx_len = 0;
3927 l2cap_conn_unreliable(conn, ECOMM);
3928 goto drop;
3931 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3932 skb->len);
3933 conn->rx_len -= skb->len;
3935 if (!conn->rx_len) {
3936 /* Complete frame received */
3937 l2cap_recv_frame(conn, conn->rx_skb);
3938 conn->rx_skb = NULL;
3942 drop:
3943 kfree_skb(skb);
3944 return 0;
3947 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3949 struct sock *sk;
3950 struct hlist_node *node;
3952 read_lock_bh(&l2cap_sk_list.lock);
3954 sk_for_each(sk, node, &l2cap_sk_list.head) {
3955 struct l2cap_pinfo *pi = l2cap_pi(sk);
3957 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3958 batostr(&bt_sk(sk)->src),
3959 batostr(&bt_sk(sk)->dst),
3960 sk->sk_state, __le16_to_cpu(pi->psm),
3961 pi->scid, pi->dcid,
3962 pi->imtu, pi->omtu, pi->sec_level);
3965 read_unlock_bh(&l2cap_sk_list.lock);
3967 return 0;
3970 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3972 return single_open(file, l2cap_debugfs_show, inode->i_private);
3975 static const struct file_operations l2cap_debugfs_fops = {
3976 .open = l2cap_debugfs_open,
3977 .read = seq_read,
3978 .llseek = seq_lseek,
3979 .release = single_release,
3982 static struct dentry *l2cap_debugfs;
3984 static const struct proto_ops l2cap_sock_ops = {
3985 .family = PF_BLUETOOTH,
3986 .owner = THIS_MODULE,
3987 .release = l2cap_sock_release,
3988 .bind = l2cap_sock_bind,
3989 .connect = l2cap_sock_connect,
3990 .listen = l2cap_sock_listen,
3991 .accept = l2cap_sock_accept,
3992 .getname = l2cap_sock_getname,
3993 .sendmsg = l2cap_sock_sendmsg,
3994 .recvmsg = l2cap_sock_recvmsg,
3995 .poll = bt_sock_poll,
3996 .ioctl = bt_sock_ioctl,
3997 .mmap = sock_no_mmap,
3998 .socketpair = sock_no_socketpair,
3999 .shutdown = l2cap_sock_shutdown,
4000 .setsockopt = l2cap_sock_setsockopt,
4001 .getsockopt = l2cap_sock_getsockopt
4004 static const struct net_proto_family l2cap_sock_family_ops = {
4005 .family = PF_BLUETOOTH,
4006 .owner = THIS_MODULE,
4007 .create = l2cap_sock_create,
4010 static struct hci_proto l2cap_hci_proto = {
4011 .name = "L2CAP",
4012 .id = HCI_PROTO_L2CAP,
4013 .connect_ind = l2cap_connect_ind,
4014 .connect_cfm = l2cap_connect_cfm,
4015 .disconn_ind = l2cap_disconn_ind,
4016 .disconn_cfm = l2cap_disconn_cfm,
4017 .security_cfm = l2cap_security_cfm,
4018 .recv_acldata = l2cap_recv_acldata
4021 static int __init l2cap_init(void)
4023 int err;
4025 err = proto_register(&l2cap_proto, 0);
4026 if (err < 0)
4027 return err;
4029 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4030 if (err < 0) {
4031 BT_ERR("L2CAP socket registration failed");
4032 goto error;
4035 err = hci_register_proto(&l2cap_hci_proto);
4036 if (err < 0) {
4037 BT_ERR("L2CAP protocol registration failed");
4038 bt_sock_unregister(BTPROTO_L2CAP);
4039 goto error;
4042 if (bt_debugfs) {
4043 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4044 bt_debugfs, NULL, &l2cap_debugfs_fops);
4045 if (!l2cap_debugfs)
4046 BT_ERR("Failed to create L2CAP debug file");
4049 BT_INFO("L2CAP ver %s", VERSION);
4050 BT_INFO("L2CAP socket layer initialized");
4052 return 0;
4054 error:
4055 proto_unregister(&l2cap_proto);
4056 return err;
4059 static void __exit l2cap_exit(void)
4061 debugfs_remove(l2cap_debugfs);
4063 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4064 BT_ERR("L2CAP socket unregistration failed");
4066 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4067 BT_ERR("L2CAP protocol unregistration failed");
4069 proto_unregister(&l2cap_proto);
4072 void l2cap_load(void)
4074 /* Dummy function to trigger automatic L2CAP module loading by
4075 * other modules that use L2CAP sockets but don't use any other
4076 * symbols from it. */
4077 return;
4079 EXPORT_SYMBOL(l2cap_load);
4081 module_init(l2cap_init);
4082 module_exit(l2cap_exit);
4084 module_param(enable_ertm, bool, 0644);
4085 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4087 module_param(max_transmit, uint, 0644);
4088 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4090 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4091 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4092 MODULE_VERSION(VERSION);
4093 MODULE_LICENSE("GPL");
4094 MODULE_ALIAS("bt-proto-0");