Misc: Add possibility to remove misc devices during suspend/resume
[pv_ops_mirror.git] / net / bluetooth / l2cap.c
bloba8811c0a0ceaf436f74002397041765e5757f7a3
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.9"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
86 l2cap_sock_kill(sk);
87 sock_put(sk);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
110 return s;
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
120 return s;
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
142 return s;
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
157 u16 cid = 0x0040;
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
164 return 0;
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
169 sock_hold(sk);
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
193 __sock_put(sk);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
221 if (parent)
222 bt_accept_enqueue(parent, sk);
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
246 if (err)
247 sk->sk_err = err;
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
258 u8 id;
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
266 spin_lock_bh(&conn->lock);
268 if (++conn->tx_ident > 128)
269 conn->tx_ident = 1;
271 id = conn->tx_ident;
273 spin_unlock_bh(&conn->lock);
275 return id;
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
282 BT_DBG("code 0x%2.2x", code);
284 if (!skb)
285 return -ENOMEM;
287 return hci_send_acl(conn->hcon, skb, 0);
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
293 struct l2cap_chan_list *l = &conn->chan_list;
294 struct sock *sk;
296 BT_DBG("conn %p", conn);
298 read_lock(&l->lock);
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
301 bh_lock_sock(sk);
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
316 bh_unlock_sock(sk);
319 read_unlock(&l->lock);
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
324 BT_DBG("conn %p", conn);
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
345 struct l2cap_chan_list *l = &conn->chan_list;
346 struct sock *sk;
348 BT_DBG("conn %p", conn);
350 read_lock(&l->lock);
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
354 sk->sk_err = err;
357 read_unlock(&l->lock);
360 static void l2cap_info_timeout(unsigned long arg)
362 struct l2cap_conn *conn = (void *) arg;
364 conn->info_ident = 0;
366 l2cap_conn_start(conn);
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
371 struct l2cap_conn *conn = hcon->l2cap_data;
373 if (conn || status)
374 return conn;
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
377 if (!conn)
378 return NULL;
380 hcon->l2cap_data = conn;
381 conn->hcon = hcon;
383 BT_DBG("hcon %p conn %p", hcon, conn);
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
389 conn->feat_mask = 0;
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
396 return conn;
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
401 struct l2cap_conn *conn = hcon->l2cap_data;
402 struct sock *sk;
404 if (!conn)
405 return;
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
409 if (conn->rx_skb)
410 kfree_skb(conn->rx_skb);
412 /* Kill channels */
413 while ((sk = conn->chan_list.head)) {
414 bh_lock_sock(sk);
415 l2cap_chan_del(sk, err);
416 bh_unlock_sock(sk);
417 l2cap_sock_kill(sk);
420 hcon->l2cap_data = NULL;
421 kfree(conn);
424 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
426 struct l2cap_chan_list *l = &conn->chan_list;
427 write_lock_bh(&l->lock);
428 __l2cap_chan_add(conn, sk, parent);
429 write_unlock_bh(&l->lock);
432 /* ---- Socket interface ---- */
433 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
435 struct sock *sk;
436 struct hlist_node *node;
437 sk_for_each(sk, node, &l2cap_sk_list.head)
438 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
439 goto found;
440 sk = NULL;
441 found:
442 return sk;
445 /* Find socket with psm and source bdaddr.
446 * Returns closest match.
448 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
450 struct sock *sk = NULL, *sk1 = NULL;
451 struct hlist_node *node;
453 sk_for_each(sk, node, &l2cap_sk_list.head) {
454 if (state && sk->sk_state != state)
455 continue;
457 if (l2cap_pi(sk)->psm == psm) {
458 /* Exact match. */
459 if (!bacmp(&bt_sk(sk)->src, src))
460 break;
462 /* Closest match */
463 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
464 sk1 = sk;
467 return node ? sk : sk1;
470 /* Find socket with given address (psm, src).
471 * Returns locked socket */
472 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
474 struct sock *s;
475 read_lock(&l2cap_sk_list.lock);
476 s = __l2cap_get_sock_by_psm(state, psm, src);
477 if (s) bh_lock_sock(s);
478 read_unlock(&l2cap_sk_list.lock);
479 return s;
482 static void l2cap_sock_destruct(struct sock *sk)
484 BT_DBG("sk %p", sk);
486 skb_queue_purge(&sk->sk_receive_queue);
487 skb_queue_purge(&sk->sk_write_queue);
490 static void l2cap_sock_cleanup_listen(struct sock *parent)
492 struct sock *sk;
494 BT_DBG("parent %p", parent);
496 /* Close not yet accepted channels */
497 while ((sk = bt_accept_dequeue(parent, NULL)))
498 l2cap_sock_close(sk);
500 parent->sk_state = BT_CLOSED;
501 sock_set_flag(parent, SOCK_ZAPPED);
504 /* Kill socket (only if zapped and orphan)
505 * Must be called on unlocked socket.
507 static void l2cap_sock_kill(struct sock *sk)
509 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
510 return;
512 BT_DBG("sk %p state %d", sk, sk->sk_state);
514 /* Kill poor orphan */
515 bt_sock_unlink(&l2cap_sk_list, sk);
516 sock_set_flag(sk, SOCK_DEAD);
517 sock_put(sk);
520 static void __l2cap_sock_close(struct sock *sk, int reason)
522 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
524 switch (sk->sk_state) {
525 case BT_LISTEN:
526 l2cap_sock_cleanup_listen(sk);
527 break;
529 case BT_CONNECTED:
530 case BT_CONFIG:
531 case BT_CONNECT2:
532 if (sk->sk_type == SOCK_SEQPACKET) {
533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
534 struct l2cap_disconn_req req;
536 sk->sk_state = BT_DISCONN;
537 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
539 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 l2cap_send_cmd(conn, l2cap_get_ident(conn),
542 L2CAP_DISCONN_REQ, sizeof(req), &req);
543 } else {
544 l2cap_chan_del(sk, reason);
546 break;
548 case BT_CONNECT:
549 case BT_DISCONN:
550 l2cap_chan_del(sk, reason);
551 break;
553 default:
554 sock_set_flag(sk, SOCK_ZAPPED);
555 break;
559 /* Must be called on unlocked socket. */
560 static void l2cap_sock_close(struct sock *sk)
562 l2cap_sock_clear_timer(sk);
563 lock_sock(sk);
564 __l2cap_sock_close(sk, ECONNRESET);
565 release_sock(sk);
566 l2cap_sock_kill(sk);
569 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
571 struct l2cap_pinfo *pi = l2cap_pi(sk);
573 BT_DBG("sk %p", sk);
575 if (parent) {
576 sk->sk_type = parent->sk_type;
577 pi->imtu = l2cap_pi(parent)->imtu;
578 pi->omtu = l2cap_pi(parent)->omtu;
579 pi->link_mode = l2cap_pi(parent)->link_mode;
580 } else {
581 pi->imtu = L2CAP_DEFAULT_MTU;
582 pi->omtu = 0;
583 pi->link_mode = 0;
586 /* Default config options */
587 pi->conf_len = 0;
588 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
591 static struct proto l2cap_proto = {
592 .name = "L2CAP",
593 .owner = THIS_MODULE,
594 .obj_size = sizeof(struct l2cap_pinfo)
597 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
599 struct sock *sk;
601 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
602 if (!sk)
603 return NULL;
605 sock_init_data(sock, sk);
606 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
608 sk->sk_destruct = l2cap_sock_destruct;
609 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
611 sock_reset_flag(sk, SOCK_ZAPPED);
613 sk->sk_protocol = proto;
614 sk->sk_state = BT_OPEN;
616 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
618 bt_sock_link(&l2cap_sk_list, sk);
619 return sk;
622 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
624 struct sock *sk;
626 BT_DBG("sock %p", sock);
628 sock->state = SS_UNCONNECTED;
630 if (sock->type != SOCK_SEQPACKET &&
631 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
632 return -ESOCKTNOSUPPORT;
634 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
635 return -EPERM;
637 sock->ops = &l2cap_sock_ops;
639 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
640 if (!sk)
641 return -ENOMEM;
643 l2cap_sock_init(sk, NULL);
644 return 0;
647 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
649 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
650 struct sock *sk = sock->sk;
651 int err = 0;
653 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
655 if (!addr || addr->sa_family != AF_BLUETOOTH)
656 return -EINVAL;
658 lock_sock(sk);
660 if (sk->sk_state != BT_OPEN) {
661 err = -EBADFD;
662 goto done;
665 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
666 !capable(CAP_NET_BIND_SERVICE)) {
667 err = -EACCES;
668 goto done;
671 write_lock_bh(&l2cap_sk_list.lock);
673 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
674 err = -EADDRINUSE;
675 } else {
676 /* Save source address */
677 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
678 l2cap_pi(sk)->psm = la->l2_psm;
679 l2cap_pi(sk)->sport = la->l2_psm;
680 sk->sk_state = BT_BOUND;
683 write_unlock_bh(&l2cap_sk_list.lock);
685 done:
686 release_sock(sk);
687 return err;
690 static int l2cap_do_connect(struct sock *sk)
692 bdaddr_t *src = &bt_sk(sk)->src;
693 bdaddr_t *dst = &bt_sk(sk)->dst;
694 struct l2cap_conn *conn;
695 struct hci_conn *hcon;
696 struct hci_dev *hdev;
697 int err = 0;
699 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
701 if (!(hdev = hci_get_route(dst, src)))
702 return -EHOSTUNREACH;
704 hci_dev_lock_bh(hdev);
706 err = -ENOMEM;
708 hcon = hci_connect(hdev, ACL_LINK, dst);
709 if (!hcon)
710 goto done;
712 conn = l2cap_conn_add(hcon, 0);
713 if (!conn) {
714 hci_conn_put(hcon);
715 goto done;
718 err = 0;
720 /* Update source addr of the socket */
721 bacpy(src, conn->src);
723 l2cap_chan_add(conn, sk, NULL);
725 sk->sk_state = BT_CONNECT;
726 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
728 if (hcon->state == BT_CONNECTED) {
729 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
730 l2cap_conn_ready(conn);
731 goto done;
734 if (sk->sk_type == SOCK_SEQPACKET) {
735 struct l2cap_conn_req req;
736 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
737 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
738 req.psm = l2cap_pi(sk)->psm;
739 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
740 L2CAP_CONN_REQ, sizeof(req), &req);
741 } else {
742 l2cap_sock_clear_timer(sk);
743 sk->sk_state = BT_CONNECTED;
747 done:
748 hci_dev_unlock_bh(hdev);
749 hci_dev_put(hdev);
750 return err;
753 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
755 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
756 struct sock *sk = sock->sk;
757 int err = 0;
759 lock_sock(sk);
761 BT_DBG("sk %p", sk);
763 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
764 err = -EINVAL;
765 goto done;
768 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
769 err = -EINVAL;
770 goto done;
773 switch(sk->sk_state) {
774 case BT_CONNECT:
775 case BT_CONNECT2:
776 case BT_CONFIG:
777 /* Already connecting */
778 goto wait;
780 case BT_CONNECTED:
781 /* Already connected */
782 goto done;
784 case BT_OPEN:
785 case BT_BOUND:
786 /* Can connect */
787 break;
789 default:
790 err = -EBADFD;
791 goto done;
794 /* Set destination address and psm */
795 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
796 l2cap_pi(sk)->psm = la->l2_psm;
798 if ((err = l2cap_do_connect(sk)))
799 goto done;
801 wait:
802 err = bt_sock_wait_state(sk, BT_CONNECTED,
803 sock_sndtimeo(sk, flags & O_NONBLOCK));
804 done:
805 release_sock(sk);
806 return err;
809 static int l2cap_sock_listen(struct socket *sock, int backlog)
811 struct sock *sk = sock->sk;
812 int err = 0;
814 BT_DBG("sk %p backlog %d", sk, backlog);
816 lock_sock(sk);
818 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
819 err = -EBADFD;
820 goto done;
823 if (!l2cap_pi(sk)->psm) {
824 bdaddr_t *src = &bt_sk(sk)->src;
825 u16 psm;
827 err = -EINVAL;
829 write_lock_bh(&l2cap_sk_list.lock);
831 for (psm = 0x1001; psm < 0x1100; psm += 2)
832 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
833 l2cap_pi(sk)->psm = htobs(psm);
834 l2cap_pi(sk)->sport = htobs(psm);
835 err = 0;
836 break;
839 write_unlock_bh(&l2cap_sk_list.lock);
841 if (err < 0)
842 goto done;
845 sk->sk_max_ack_backlog = backlog;
846 sk->sk_ack_backlog = 0;
847 sk->sk_state = BT_LISTEN;
849 done:
850 release_sock(sk);
851 return err;
854 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
856 DECLARE_WAITQUEUE(wait, current);
857 struct sock *sk = sock->sk, *nsk;
858 long timeo;
859 int err = 0;
861 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
863 if (sk->sk_state != BT_LISTEN) {
864 err = -EBADFD;
865 goto done;
868 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
870 BT_DBG("sk %p timeo %ld", sk, timeo);
872 /* Wait for an incoming connection. (wake-one). */
873 add_wait_queue_exclusive(sk->sk_sleep, &wait);
874 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
875 set_current_state(TASK_INTERRUPTIBLE);
876 if (!timeo) {
877 err = -EAGAIN;
878 break;
881 release_sock(sk);
882 timeo = schedule_timeout(timeo);
883 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
885 if (sk->sk_state != BT_LISTEN) {
886 err = -EBADFD;
887 break;
890 if (signal_pending(current)) {
891 err = sock_intr_errno(timeo);
892 break;
895 set_current_state(TASK_RUNNING);
896 remove_wait_queue(sk->sk_sleep, &wait);
898 if (err)
899 goto done;
901 newsock->state = SS_CONNECTED;
903 BT_DBG("new socket %p", nsk);
905 done:
906 release_sock(sk);
907 return err;
910 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
912 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
913 struct sock *sk = sock->sk;
915 BT_DBG("sock %p, sk %p", sock, sk);
917 addr->sa_family = AF_BLUETOOTH;
918 *len = sizeof(struct sockaddr_l2);
920 if (peer)
921 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
922 else
923 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
925 la->l2_psm = l2cap_pi(sk)->psm;
926 return 0;
929 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
931 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
932 struct sk_buff *skb, **frag;
933 int err, hlen, count, sent=0;
934 struct l2cap_hdr *lh;
936 BT_DBG("sk %p len %d", sk, len);
938 /* First fragment (with L2CAP header) */
939 if (sk->sk_type == SOCK_DGRAM)
940 hlen = L2CAP_HDR_SIZE + 2;
941 else
942 hlen = L2CAP_HDR_SIZE;
944 count = min_t(unsigned int, (conn->mtu - hlen), len);
946 skb = bt_skb_send_alloc(sk, hlen + count,
947 msg->msg_flags & MSG_DONTWAIT, &err);
948 if (!skb)
949 return err;
951 /* Create L2CAP header */
952 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
953 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
954 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
956 if (sk->sk_type == SOCK_DGRAM)
957 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
959 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
960 err = -EFAULT;
961 goto fail;
964 sent += count;
965 len -= count;
967 /* Continuation fragments (no L2CAP header) */
968 frag = &skb_shinfo(skb)->frag_list;
969 while (len) {
970 count = min_t(unsigned int, conn->mtu, len);
972 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
973 if (!*frag)
974 goto fail;
976 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
977 err = -EFAULT;
978 goto fail;
981 sent += count;
982 len -= count;
984 frag = &(*frag)->next;
987 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
988 goto fail;
990 return sent;
992 fail:
993 kfree_skb(skb);
994 return err;
997 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
999 struct sock *sk = sock->sk;
1000 int err = 0;
1002 BT_DBG("sock %p, sk %p", sock, sk);
1004 err = sock_error(sk);
1005 if (err)
1006 return err;
1008 if (msg->msg_flags & MSG_OOB)
1009 return -EOPNOTSUPP;
1011 /* Check outgoing MTU */
1012 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1013 return -EINVAL;
1015 lock_sock(sk);
1017 if (sk->sk_state == BT_CONNECTED)
1018 err = l2cap_do_send(sk, msg, len);
1019 else
1020 err = -ENOTCONN;
1022 release_sock(sk);
1023 return err;
1026 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1028 struct sock *sk = sock->sk;
1029 struct l2cap_options opts;
1030 int err = 0, len;
1031 u32 opt;
1033 BT_DBG("sk %p", sk);
1035 lock_sock(sk);
1037 switch (optname) {
1038 case L2CAP_OPTIONS:
1039 opts.imtu = l2cap_pi(sk)->imtu;
1040 opts.omtu = l2cap_pi(sk)->omtu;
1041 opts.flush_to = l2cap_pi(sk)->flush_to;
1042 opts.mode = L2CAP_MODE_BASIC;
1044 len = min_t(unsigned int, sizeof(opts), optlen);
1045 if (copy_from_user((char *) &opts, optval, len)) {
1046 err = -EFAULT;
1047 break;
1050 l2cap_pi(sk)->imtu = opts.imtu;
1051 l2cap_pi(sk)->omtu = opts.omtu;
1052 break;
1054 case L2CAP_LM:
1055 if (get_user(opt, (u32 __user *) optval)) {
1056 err = -EFAULT;
1057 break;
1060 l2cap_pi(sk)->link_mode = opt;
1061 break;
1063 default:
1064 err = -ENOPROTOOPT;
1065 break;
1068 release_sock(sk);
1069 return err;
1072 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1074 struct sock *sk = sock->sk;
1075 struct l2cap_options opts;
1076 struct l2cap_conninfo cinfo;
1077 int len, err = 0;
1079 BT_DBG("sk %p", sk);
1081 if (get_user(len, optlen))
1082 return -EFAULT;
1084 lock_sock(sk);
1086 switch (optname) {
1087 case L2CAP_OPTIONS:
1088 opts.imtu = l2cap_pi(sk)->imtu;
1089 opts.omtu = l2cap_pi(sk)->omtu;
1090 opts.flush_to = l2cap_pi(sk)->flush_to;
1091 opts.mode = L2CAP_MODE_BASIC;
1093 len = min_t(unsigned int, len, sizeof(opts));
1094 if (copy_to_user(optval, (char *) &opts, len))
1095 err = -EFAULT;
1097 break;
1099 case L2CAP_LM:
1100 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1101 err = -EFAULT;
1102 break;
1104 case L2CAP_CONNINFO:
1105 if (sk->sk_state != BT_CONNECTED) {
1106 err = -ENOTCONN;
1107 break;
1110 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1111 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1113 len = min_t(unsigned int, len, sizeof(cinfo));
1114 if (copy_to_user(optval, (char *) &cinfo, len))
1115 err = -EFAULT;
1117 break;
1119 default:
1120 err = -ENOPROTOOPT;
1121 break;
1124 release_sock(sk);
1125 return err;
1128 static int l2cap_sock_shutdown(struct socket *sock, int how)
1130 struct sock *sk = sock->sk;
1131 int err = 0;
1133 BT_DBG("sock %p, sk %p", sock, sk);
1135 if (!sk)
1136 return 0;
1138 lock_sock(sk);
1139 if (!sk->sk_shutdown) {
1140 sk->sk_shutdown = SHUTDOWN_MASK;
1141 l2cap_sock_clear_timer(sk);
1142 __l2cap_sock_close(sk, 0);
1144 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1145 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1147 release_sock(sk);
1148 return err;
1151 static int l2cap_sock_release(struct socket *sock)
1153 struct sock *sk = sock->sk;
1154 int err;
1156 BT_DBG("sock %p, sk %p", sock, sk);
1158 if (!sk)
1159 return 0;
1161 err = l2cap_sock_shutdown(sock, 2);
1163 sock_orphan(sk);
1164 l2cap_sock_kill(sk);
1165 return err;
1168 static void l2cap_chan_ready(struct sock *sk)
1170 struct sock *parent = bt_sk(sk)->parent;
1172 BT_DBG("sk %p, parent %p", sk, parent);
1174 l2cap_pi(sk)->conf_state = 0;
1175 l2cap_sock_clear_timer(sk);
1177 if (!parent) {
1178 /* Outgoing channel.
1179 * Wake up socket sleeping on connect.
1181 sk->sk_state = BT_CONNECTED;
1182 sk->sk_state_change(sk);
1183 } else {
1184 /* Incoming channel.
1185 * Wake up socket sleeping on accept.
1187 parent->sk_data_ready(parent, 0);
1191 /* Copy frame to all raw sockets on that connection */
1192 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1194 struct l2cap_chan_list *l = &conn->chan_list;
1195 struct sk_buff *nskb;
1196 struct sock * sk;
1198 BT_DBG("conn %p", conn);
1200 read_lock(&l->lock);
1201 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1202 if (sk->sk_type != SOCK_RAW)
1203 continue;
1205 /* Don't send frame to the socket it came from */
1206 if (skb->sk == sk)
1207 continue;
1209 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1210 continue;
1212 if (sock_queue_rcv_skb(sk, nskb))
1213 kfree_skb(nskb);
1215 read_unlock(&l->lock);
1218 /* ---- L2CAP signalling commands ---- */
1219 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1220 u8 code, u8 ident, u16 dlen, void *data)
1222 struct sk_buff *skb, **frag;
1223 struct l2cap_cmd_hdr *cmd;
1224 struct l2cap_hdr *lh;
1225 int len, count;
1227 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1229 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1230 count = min_t(unsigned int, conn->mtu, len);
1232 skb = bt_skb_alloc(count, GFP_ATOMIC);
1233 if (!skb)
1234 return NULL;
1236 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1237 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1238 lh->cid = cpu_to_le16(0x0001);
1240 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1241 cmd->code = code;
1242 cmd->ident = ident;
1243 cmd->len = cpu_to_le16(dlen);
1245 if (dlen) {
1246 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1247 memcpy(skb_put(skb, count), data, count);
1248 data += count;
1251 len -= skb->len;
1253 /* Continuation fragments (no L2CAP header) */
1254 frag = &skb_shinfo(skb)->frag_list;
1255 while (len) {
1256 count = min_t(unsigned int, conn->mtu, len);
1258 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1259 if (!*frag)
1260 goto fail;
1262 memcpy(skb_put(*frag, count), data, count);
1264 len -= count;
1265 data += count;
1267 frag = &(*frag)->next;
1270 return skb;
1272 fail:
1273 kfree_skb(skb);
1274 return NULL;
1277 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1279 struct l2cap_conf_opt *opt = *ptr;
1280 int len;
1282 len = L2CAP_CONF_OPT_SIZE + opt->len;
1283 *ptr += len;
1285 *type = opt->type;
1286 *olen = opt->len;
1288 switch (opt->len) {
1289 case 1:
1290 *val = *((u8 *) opt->val);
1291 break;
1293 case 2:
1294 *val = __le16_to_cpu(*((__le16 *) opt->val));
1295 break;
1297 case 4:
1298 *val = __le32_to_cpu(*((__le32 *) opt->val));
1299 break;
1301 default:
1302 *val = (unsigned long) opt->val;
1303 break;
1306 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1307 return len;
1310 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1312 struct l2cap_conf_opt *opt = *ptr;
1314 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1316 opt->type = type;
1317 opt->len = len;
1319 switch (len) {
1320 case 1:
1321 *((u8 *) opt->val) = val;
1322 break;
1324 case 2:
1325 *((__le16 *) opt->val) = cpu_to_le16(val);
1326 break;
1328 case 4:
1329 *((__le32 *) opt->val) = cpu_to_le32(val);
1330 break;
1332 default:
1333 memcpy(opt->val, (void *) val, len);
1334 break;
1337 *ptr += L2CAP_CONF_OPT_SIZE + len;
1340 static int l2cap_build_conf_req(struct sock *sk, void *data)
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1343 struct l2cap_conf_req *req = data;
1344 void *ptr = req->data;
1346 BT_DBG("sk %p", sk);
1348 if (pi->imtu != L2CAP_DEFAULT_MTU)
1349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1351 /* FIXME: Need actual value of the flush timeout */
1352 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1353 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1355 req->dcid = cpu_to_le16(pi->dcid);
1356 req->flags = cpu_to_le16(0);
1358 return ptr - data;
1361 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1363 struct l2cap_pinfo *pi = l2cap_pi(sk);
1364 struct l2cap_conf_rsp *rsp = data;
1365 void *ptr = rsp->data;
1366 void *req = pi->conf_req;
1367 int len = pi->conf_len;
1368 int type, hint, olen;
1369 unsigned long val;
1370 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1371 u16 mtu = L2CAP_DEFAULT_MTU;
1372 u16 result = L2CAP_CONF_SUCCESS;
1374 BT_DBG("sk %p", sk);
1376 while (len >= L2CAP_CONF_OPT_SIZE) {
1377 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1379 hint = type & 0x80;
1380 type &= 0x7f;
1382 switch (type) {
1383 case L2CAP_CONF_MTU:
1384 mtu = val;
1385 break;
1387 case L2CAP_CONF_FLUSH_TO:
1388 pi->flush_to = val;
1389 break;
1391 case L2CAP_CONF_QOS:
1392 break;
1394 case L2CAP_CONF_RFC:
1395 if (olen == sizeof(rfc))
1396 memcpy(&rfc, (void *) val, olen);
1397 break;
1399 default:
1400 if (hint)
1401 break;
1403 result = L2CAP_CONF_UNKNOWN;
1404 *((u8 *) ptr++) = type;
1405 break;
1409 if (result == L2CAP_CONF_SUCCESS) {
1410 /* Configure output options and let the other side know
1411 * which ones we don't like. */
1413 if (rfc.mode == L2CAP_MODE_BASIC) {
1414 if (mtu < pi->omtu)
1415 result = L2CAP_CONF_UNACCEPT;
1416 else {
1417 pi->omtu = mtu;
1418 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1422 } else {
1423 result = L2CAP_CONF_UNACCEPT;
1425 memset(&rfc, 0, sizeof(rfc));
1426 rfc.mode = L2CAP_MODE_BASIC;
1428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1429 sizeof(rfc), (unsigned long) &rfc);
1433 rsp->scid = cpu_to_le16(pi->dcid);
1434 rsp->result = cpu_to_le16(result);
1435 rsp->flags = cpu_to_le16(0x0000);
1437 return ptr - data;
1440 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1442 struct l2cap_conf_rsp *rsp = data;
1443 void *ptr = rsp->data;
1445 BT_DBG("sk %p", sk);
1447 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1448 rsp->result = cpu_to_le16(result);
1449 rsp->flags = cpu_to_le16(flags);
1451 return ptr - data;
1454 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1456 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1458 if (rej->reason != 0x0000)
1459 return 0;
1461 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1462 cmd->ident == conn->info_ident) {
1463 conn->info_ident = 0;
1464 del_timer(&conn->info_timer);
1465 l2cap_conn_start(conn);
1468 return 0;
1471 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1473 struct l2cap_chan_list *list = &conn->chan_list;
1474 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1475 struct l2cap_conn_rsp rsp;
1476 struct sock *sk, *parent;
1477 int result = 0, status = 0;
1479 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1480 __le16 psm = req->psm;
1482 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1484 /* Check if we have socket listening on psm */
1485 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1486 if (!parent) {
1487 result = L2CAP_CR_BAD_PSM;
1488 goto sendresp;
1491 result = L2CAP_CR_NO_MEM;
1493 /* Check for backlog size */
1494 if (sk_acceptq_is_full(parent)) {
1495 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1496 goto response;
1499 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1500 if (!sk)
1501 goto response;
1503 write_lock_bh(&list->lock);
1505 /* Check if we already have channel with that dcid */
1506 if (__l2cap_get_chan_by_dcid(list, scid)) {
1507 write_unlock_bh(&list->lock);
1508 sock_set_flag(sk, SOCK_ZAPPED);
1509 l2cap_sock_kill(sk);
1510 goto response;
1513 hci_conn_hold(conn->hcon);
1515 l2cap_sock_init(sk, parent);
1516 bacpy(&bt_sk(sk)->src, conn->src);
1517 bacpy(&bt_sk(sk)->dst, conn->dst);
1518 l2cap_pi(sk)->psm = psm;
1519 l2cap_pi(sk)->dcid = scid;
1521 __l2cap_chan_add(conn, sk, parent);
1522 dcid = l2cap_pi(sk)->scid;
1524 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1526 /* Service level security */
1527 result = L2CAP_CR_PEND;
1528 status = L2CAP_CS_AUTHEN_PEND;
1529 sk->sk_state = BT_CONNECT2;
1530 l2cap_pi(sk)->ident = cmd->ident;
1532 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1533 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1534 if (!hci_conn_encrypt(conn->hcon))
1535 goto done;
1536 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1537 if (!hci_conn_auth(conn->hcon))
1538 goto done;
1541 sk->sk_state = BT_CONFIG;
1542 result = status = 0;
1544 done:
1545 write_unlock_bh(&list->lock);
1547 response:
1548 bh_unlock_sock(parent);
1550 sendresp:
1551 rsp.scid = cpu_to_le16(scid);
1552 rsp.dcid = cpu_to_le16(dcid);
1553 rsp.result = cpu_to_le16(result);
1554 rsp.status = cpu_to_le16(status);
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1556 return 0;
1559 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1561 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1562 u16 scid, dcid, result, status;
1563 struct sock *sk;
1564 u8 req[128];
1566 scid = __le16_to_cpu(rsp->scid);
1567 dcid = __le16_to_cpu(rsp->dcid);
1568 result = __le16_to_cpu(rsp->result);
1569 status = __le16_to_cpu(rsp->status);
1571 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1573 if (scid) {
1574 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1575 return 0;
1576 } else {
1577 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1578 return 0;
1581 switch (result) {
1582 case L2CAP_CR_SUCCESS:
1583 sk->sk_state = BT_CONFIG;
1584 l2cap_pi(sk)->ident = 0;
1585 l2cap_pi(sk)->dcid = dcid;
1586 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1589 l2cap_build_conf_req(sk, req), req);
1590 break;
1592 case L2CAP_CR_PEND:
1593 break;
1595 default:
1596 l2cap_chan_del(sk, ECONNREFUSED);
1597 break;
1600 bh_unlock_sock(sk);
1601 return 0;
1604 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1606 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1607 u16 dcid, flags;
1608 u8 rsp[64];
1609 struct sock *sk;
1610 int len;
1612 dcid = __le16_to_cpu(req->dcid);
1613 flags = __le16_to_cpu(req->flags);
1615 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1617 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1618 return -ENOENT;
1620 if (sk->sk_state == BT_DISCONN)
1621 goto unlock;
1623 /* Reject if config buffer is too small. */
1624 len = cmd_len - sizeof(*req);
1625 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1627 l2cap_build_conf_rsp(sk, rsp,
1628 L2CAP_CONF_REJECT, flags), rsp);
1629 goto unlock;
1632 /* Store config. */
1633 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1634 l2cap_pi(sk)->conf_len += len;
1636 if (flags & 0x0001) {
1637 /* Incomplete config. Send empty response. */
1638 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1639 l2cap_build_conf_rsp(sk, rsp,
1640 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1641 goto unlock;
1644 /* Complete config. */
1645 len = l2cap_parse_conf_req(sk, rsp);
1646 if (len < 0)
1647 goto unlock;
1649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1651 /* Reset config buffer. */
1652 l2cap_pi(sk)->conf_len = 0;
1654 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1655 goto unlock;
1657 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1658 sk->sk_state = BT_CONNECTED;
1659 l2cap_chan_ready(sk);
1660 goto unlock;
1663 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1664 u8 req[64];
1665 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1666 l2cap_build_conf_req(sk, req), req);
1669 unlock:
1670 bh_unlock_sock(sk);
1671 return 0;
1674 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1676 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1677 u16 scid, flags, result;
1678 struct sock *sk;
1680 scid = __le16_to_cpu(rsp->scid);
1681 flags = __le16_to_cpu(rsp->flags);
1682 result = __le16_to_cpu(rsp->result);
1684 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1686 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1687 return 0;
1689 switch (result) {
1690 case L2CAP_CONF_SUCCESS:
1691 break;
1693 case L2CAP_CONF_UNACCEPT:
1694 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1695 char req[128];
1696 /* It does not make sense to adjust L2CAP parameters
1697 * that are currently defined in the spec. We simply
1698 * resend config request that we sent earlier. It is
1699 * stupid, but it helps qualification testing which
1700 * expects at least some response from us. */
1701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1702 l2cap_build_conf_req(sk, req), req);
1703 goto done;
1706 default:
1707 sk->sk_state = BT_DISCONN;
1708 sk->sk_err = ECONNRESET;
1709 l2cap_sock_set_timer(sk, HZ * 5);
1711 struct l2cap_disconn_req req;
1712 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1713 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1714 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1715 L2CAP_DISCONN_REQ, sizeof(req), &req);
1717 goto done;
1720 if (flags & 0x01)
1721 goto done;
1723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1725 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1726 sk->sk_state = BT_CONNECTED;
1727 l2cap_chan_ready(sk);
1730 done:
1731 bh_unlock_sock(sk);
1732 return 0;
1735 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1737 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1738 struct l2cap_disconn_rsp rsp;
1739 u16 dcid, scid;
1740 struct sock *sk;
1742 scid = __le16_to_cpu(req->scid);
1743 dcid = __le16_to_cpu(req->dcid);
1745 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1747 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1748 return 0;
1750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1751 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1754 sk->sk_shutdown = SHUTDOWN_MASK;
1756 l2cap_chan_del(sk, ECONNRESET);
1757 bh_unlock_sock(sk);
1759 l2cap_sock_kill(sk);
1760 return 0;
1763 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1765 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1766 u16 dcid, scid;
1767 struct sock *sk;
1769 scid = __le16_to_cpu(rsp->scid);
1770 dcid = __le16_to_cpu(rsp->dcid);
1772 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1774 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1775 return 0;
1777 l2cap_chan_del(sk, 0);
1778 bh_unlock_sock(sk);
1780 l2cap_sock_kill(sk);
1781 return 0;
1784 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1786 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1787 u16 type;
1789 type = __le16_to_cpu(req->type);
1791 BT_DBG("type 0x%4.4x", type);
1793 if (type == L2CAP_IT_FEAT_MASK) {
1794 u8 buf[8];
1795 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1796 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1797 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1798 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1799 l2cap_send_cmd(conn, cmd->ident,
1800 L2CAP_INFO_RSP, sizeof(buf), buf);
1801 } else {
1802 struct l2cap_info_rsp rsp;
1803 rsp.type = cpu_to_le16(type);
1804 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1805 l2cap_send_cmd(conn, cmd->ident,
1806 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1809 return 0;
1812 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1814 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1815 u16 type, result;
1817 type = __le16_to_cpu(rsp->type);
1818 result = __le16_to_cpu(rsp->result);
1820 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1822 conn->info_ident = 0;
1824 del_timer(&conn->info_timer);
1826 if (type == L2CAP_IT_FEAT_MASK)
1827 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1829 l2cap_conn_start(conn);
1831 return 0;
1834 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1836 u8 *data = skb->data;
1837 int len = skb->len;
1838 struct l2cap_cmd_hdr cmd;
1839 int err = 0;
1841 l2cap_raw_recv(conn, skb);
1843 while (len >= L2CAP_CMD_HDR_SIZE) {
1844 u16 cmd_len;
1845 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1846 data += L2CAP_CMD_HDR_SIZE;
1847 len -= L2CAP_CMD_HDR_SIZE;
1849 cmd_len = le16_to_cpu(cmd.len);
1851 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1853 if (cmd_len > len || !cmd.ident) {
1854 BT_DBG("corrupted command");
1855 break;
1858 switch (cmd.code) {
1859 case L2CAP_COMMAND_REJ:
1860 l2cap_command_rej(conn, &cmd, data);
1861 break;
1863 case L2CAP_CONN_REQ:
1864 err = l2cap_connect_req(conn, &cmd, data);
1865 break;
1867 case L2CAP_CONN_RSP:
1868 err = l2cap_connect_rsp(conn, &cmd, data);
1869 break;
1871 case L2CAP_CONF_REQ:
1872 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1873 break;
1875 case L2CAP_CONF_RSP:
1876 err = l2cap_config_rsp(conn, &cmd, data);
1877 break;
1879 case L2CAP_DISCONN_REQ:
1880 err = l2cap_disconnect_req(conn, &cmd, data);
1881 break;
1883 case L2CAP_DISCONN_RSP:
1884 err = l2cap_disconnect_rsp(conn, &cmd, data);
1885 break;
1887 case L2CAP_ECHO_REQ:
1888 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1889 break;
1891 case L2CAP_ECHO_RSP:
1892 break;
1894 case L2CAP_INFO_REQ:
1895 err = l2cap_information_req(conn, &cmd, data);
1896 break;
1898 case L2CAP_INFO_RSP:
1899 err = l2cap_information_rsp(conn, &cmd, data);
1900 break;
1902 default:
1903 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1904 err = -EINVAL;
1905 break;
1908 if (err) {
1909 struct l2cap_cmd_rej rej;
1910 BT_DBG("error %d", err);
1912 /* FIXME: Map err to a valid reason */
1913 rej.reason = cpu_to_le16(0);
1914 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1917 data += cmd_len;
1918 len -= cmd_len;
1921 kfree_skb(skb);
1924 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1926 struct sock *sk;
1928 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1929 if (!sk) {
1930 BT_DBG("unknown cid 0x%4.4x", cid);
1931 goto drop;
1934 BT_DBG("sk %p, len %d", sk, skb->len);
1936 if (sk->sk_state != BT_CONNECTED)
1937 goto drop;
1939 if (l2cap_pi(sk)->imtu < skb->len)
1940 goto drop;
1942 /* If socket recv buffers overflows we drop data here
1943 * which is *bad* because L2CAP has to be reliable.
1944 * But we don't have any other choice. L2CAP doesn't
1945 * provide flow control mechanism. */
1947 if (!sock_queue_rcv_skb(sk, skb))
1948 goto done;
1950 drop:
1951 kfree_skb(skb);
1953 done:
1954 if (sk)
1955 bh_unlock_sock(sk);
1957 return 0;
1960 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1962 struct sock *sk;
1964 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1965 if (!sk)
1966 goto drop;
1968 BT_DBG("sk %p, len %d", sk, skb->len);
1970 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1971 goto drop;
1973 if (l2cap_pi(sk)->imtu < skb->len)
1974 goto drop;
1976 if (!sock_queue_rcv_skb(sk, skb))
1977 goto done;
1979 drop:
1980 kfree_skb(skb);
1982 done:
1983 if (sk) bh_unlock_sock(sk);
1984 return 0;
1987 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1989 struct l2cap_hdr *lh = (void *) skb->data;
1990 u16 cid, len;
1991 __le16 psm;
1993 skb_pull(skb, L2CAP_HDR_SIZE);
1994 cid = __le16_to_cpu(lh->cid);
1995 len = __le16_to_cpu(lh->len);
1997 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1999 switch (cid) {
2000 case 0x0001:
2001 l2cap_sig_channel(conn, skb);
2002 break;
2004 case 0x0002:
2005 psm = get_unaligned((__le16 *) skb->data);
2006 skb_pull(skb, 2);
2007 l2cap_conless_channel(conn, psm, skb);
2008 break;
2010 default:
2011 l2cap_data_channel(conn, cid, skb);
2012 break;
2016 /* ---- L2CAP interface with lower layer (HCI) ---- */
2018 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2020 int exact = 0, lm1 = 0, lm2 = 0;
2021 register struct sock *sk;
2022 struct hlist_node *node;
2024 if (type != ACL_LINK)
2025 return 0;
2027 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2029 /* Find listening sockets and check their link_mode */
2030 read_lock(&l2cap_sk_list.lock);
2031 sk_for_each(sk, node, &l2cap_sk_list.head) {
2032 if (sk->sk_state != BT_LISTEN)
2033 continue;
2035 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2036 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2037 exact++;
2038 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2039 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2041 read_unlock(&l2cap_sk_list.lock);
2043 return exact ? lm1 : lm2;
2046 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2048 struct l2cap_conn *conn;
2050 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2052 if (hcon->type != ACL_LINK)
2053 return 0;
2055 if (!status) {
2056 conn = l2cap_conn_add(hcon, status);
2057 if (conn)
2058 l2cap_conn_ready(conn);
2059 } else
2060 l2cap_conn_del(hcon, bt_err(status));
2062 return 0;
2065 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2067 BT_DBG("hcon %p reason %d", hcon, reason);
2069 if (hcon->type != ACL_LINK)
2070 return 0;
2072 l2cap_conn_del(hcon, bt_err(reason));
2074 return 0;
2077 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2079 struct l2cap_chan_list *l;
2080 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2081 struct l2cap_conn_rsp rsp;
2082 struct sock *sk;
2083 int result;
2085 if (!conn)
2086 return 0;
2088 l = &conn->chan_list;
2090 BT_DBG("conn %p", conn);
2092 read_lock(&l->lock);
2094 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2095 bh_lock_sock(sk);
2097 if (sk->sk_state != BT_CONNECT2 ||
2098 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2099 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2100 bh_unlock_sock(sk);
2101 continue;
2104 if (!status) {
2105 sk->sk_state = BT_CONFIG;
2106 result = 0;
2107 } else {
2108 sk->sk_state = BT_DISCONN;
2109 l2cap_sock_set_timer(sk, HZ/10);
2110 result = L2CAP_CR_SEC_BLOCK;
2113 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2114 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2115 rsp.result = cpu_to_le16(result);
2116 rsp.status = cpu_to_le16(0);
2117 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2118 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2120 bh_unlock_sock(sk);
2123 read_unlock(&l->lock);
2124 return 0;
2127 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2129 struct l2cap_chan_list *l;
2130 struct l2cap_conn *conn = hcon->l2cap_data;
2131 struct l2cap_conn_rsp rsp;
2132 struct sock *sk;
2133 int result;
2135 if (!conn)
2136 return 0;
2138 l = &conn->chan_list;
2140 BT_DBG("conn %p", conn);
2142 read_lock(&l->lock);
2144 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2145 bh_lock_sock(sk);
2147 if (sk->sk_state != BT_CONNECT2) {
2148 bh_unlock_sock(sk);
2149 continue;
2152 if (!status) {
2153 sk->sk_state = BT_CONFIG;
2154 result = 0;
2155 } else {
2156 sk->sk_state = BT_DISCONN;
2157 l2cap_sock_set_timer(sk, HZ/10);
2158 result = L2CAP_CR_SEC_BLOCK;
2161 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2162 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2163 rsp.result = cpu_to_le16(result);
2164 rsp.status = cpu_to_le16(0);
2165 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2166 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2168 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2169 hci_conn_change_link_key(hcon);
2171 bh_unlock_sock(sk);
2174 read_unlock(&l->lock);
2175 return 0;
2178 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2180 struct l2cap_conn *conn = hcon->l2cap_data;
2182 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2183 goto drop;
2185 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2187 if (flags & ACL_START) {
2188 struct l2cap_hdr *hdr;
2189 int len;
2191 if (conn->rx_len) {
2192 BT_ERR("Unexpected start frame (len %d)", skb->len);
2193 kfree_skb(conn->rx_skb);
2194 conn->rx_skb = NULL;
2195 conn->rx_len = 0;
2196 l2cap_conn_unreliable(conn, ECOMM);
2199 if (skb->len < 2) {
2200 BT_ERR("Frame is too short (len %d)", skb->len);
2201 l2cap_conn_unreliable(conn, ECOMM);
2202 goto drop;
2205 hdr = (struct l2cap_hdr *) skb->data;
2206 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2208 if (len == skb->len) {
2209 /* Complete frame received */
2210 l2cap_recv_frame(conn, skb);
2211 return 0;
2214 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2216 if (skb->len > len) {
2217 BT_ERR("Frame is too long (len %d, expected len %d)",
2218 skb->len, len);
2219 l2cap_conn_unreliable(conn, ECOMM);
2220 goto drop;
2223 /* Allocate skb for the complete frame (with header) */
2224 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2225 goto drop;
2227 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2228 skb->len);
2229 conn->rx_len = len - skb->len;
2230 } else {
2231 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2233 if (!conn->rx_len) {
2234 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2235 l2cap_conn_unreliable(conn, ECOMM);
2236 goto drop;
2239 if (skb->len > conn->rx_len) {
2240 BT_ERR("Fragment is too long (len %d, expected %d)",
2241 skb->len, conn->rx_len);
2242 kfree_skb(conn->rx_skb);
2243 conn->rx_skb = NULL;
2244 conn->rx_len = 0;
2245 l2cap_conn_unreliable(conn, ECOMM);
2246 goto drop;
2249 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2250 skb->len);
2251 conn->rx_len -= skb->len;
2253 if (!conn->rx_len) {
2254 /* Complete frame received */
2255 l2cap_recv_frame(conn, conn->rx_skb);
2256 conn->rx_skb = NULL;
2260 drop:
2261 kfree_skb(skb);
2262 return 0;
2265 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2267 struct sock *sk;
2268 struct hlist_node *node;
2269 char *str = buf;
2271 read_lock_bh(&l2cap_sk_list.lock);
2273 sk_for_each(sk, node, &l2cap_sk_list.head) {
2274 struct l2cap_pinfo *pi = l2cap_pi(sk);
2276 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2277 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2278 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2279 pi->imtu, pi->omtu, pi->link_mode);
2282 read_unlock_bh(&l2cap_sk_list.lock);
2284 return (str - buf);
2287 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2289 static const struct proto_ops l2cap_sock_ops = {
2290 .family = PF_BLUETOOTH,
2291 .owner = THIS_MODULE,
2292 .release = l2cap_sock_release,
2293 .bind = l2cap_sock_bind,
2294 .connect = l2cap_sock_connect,
2295 .listen = l2cap_sock_listen,
2296 .accept = l2cap_sock_accept,
2297 .getname = l2cap_sock_getname,
2298 .sendmsg = l2cap_sock_sendmsg,
2299 .recvmsg = bt_sock_recvmsg,
2300 .poll = bt_sock_poll,
2301 .mmap = sock_no_mmap,
2302 .socketpair = sock_no_socketpair,
2303 .ioctl = sock_no_ioctl,
2304 .shutdown = l2cap_sock_shutdown,
2305 .setsockopt = l2cap_sock_setsockopt,
2306 .getsockopt = l2cap_sock_getsockopt
2309 static struct net_proto_family l2cap_sock_family_ops = {
2310 .family = PF_BLUETOOTH,
2311 .owner = THIS_MODULE,
2312 .create = l2cap_sock_create,
2315 static struct hci_proto l2cap_hci_proto = {
2316 .name = "L2CAP",
2317 .id = HCI_PROTO_L2CAP,
2318 .connect_ind = l2cap_connect_ind,
2319 .connect_cfm = l2cap_connect_cfm,
2320 .disconn_ind = l2cap_disconn_ind,
2321 .auth_cfm = l2cap_auth_cfm,
2322 .encrypt_cfm = l2cap_encrypt_cfm,
2323 .recv_acldata = l2cap_recv_acldata
2326 static int __init l2cap_init(void)
2328 int err;
2330 err = proto_register(&l2cap_proto, 0);
2331 if (err < 0)
2332 return err;
2334 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2335 if (err < 0) {
2336 BT_ERR("L2CAP socket registration failed");
2337 goto error;
2340 err = hci_register_proto(&l2cap_hci_proto);
2341 if (err < 0) {
2342 BT_ERR("L2CAP protocol registration failed");
2343 bt_sock_unregister(BTPROTO_L2CAP);
2344 goto error;
2347 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2348 BT_ERR("Failed to create L2CAP info file");
2350 BT_INFO("L2CAP ver %s", VERSION);
2351 BT_INFO("L2CAP socket layer initialized");
2353 return 0;
2355 error:
2356 proto_unregister(&l2cap_proto);
2357 return err;
2360 static void __exit l2cap_exit(void)
2362 class_remove_file(bt_class, &class_attr_l2cap);
2364 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2365 BT_ERR("L2CAP socket unregistration failed");
2367 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2368 BT_ERR("L2CAP protocol unregistration failed");
2370 proto_unregister(&l2cap_proto);
2373 void l2cap_load(void)
2375 /* Dummy function to trigger automatic L2CAP module loading by
2376 * other modules that use L2CAP sockets but don't use any other
2377 * symbols from it. */
2378 return;
2380 EXPORT_SYMBOL(l2cap_load);
2382 module_init(l2cap_init);
2383 module_exit(l2cap_exit);
2385 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2386 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2387 MODULE_VERSION(VERSION);
2388 MODULE_LICENSE("GPL");
2389 MODULE_ALIAS("bt-proto-0");