net: sk_sleep() helper
[linux/fpc-iii.git] / net / bluetooth / l2cap.c
blobc1e60eed5a97a785f2a4547cb0684d8b58dd35cf
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
81 int reason;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 bh_lock_sock(sk);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
95 __l2cap_sock_close(sk, reason);
97 bh_unlock_sock(sk);
99 l2cap_sock_kill(sk);
100 sock_put(sk);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
123 return s;
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
133 return s;
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
156 return s;
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
179 return 0;
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 sock_hold(sk);
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
208 __sock_put(sk);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
239 if (parent)
240 bt_accept_enqueue(parent, sk);
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
264 if (err)
265 sk->sk_err = err;
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
308 u8 id;
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
321 id = conn->tx_ident;
323 spin_unlock_bh(&conn->lock);
325 return id;
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
334 if (!skb)
335 return -ENOMEM;
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 if (!skb)
357 return -ENOMEM;
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
376 else
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 return;
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
402 } else {
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
433 BT_DBG("conn %p", conn);
435 read_lock(&l->lock);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 bh_unlock_sock(sk);
442 continue;
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
468 } else {
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
473 } else {
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
482 bh_unlock_sock(sk);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
491 struct sock *sk;
493 BT_DBG("conn %p", conn);
495 read_lock(&l->lock);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 bh_lock_sock(sk);
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
505 l2cap_do_start(sk);
507 bh_unlock_sock(sk);
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
517 struct sock *sk;
519 BT_DBG("conn %p", conn);
521 read_lock(&l->lock);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
525 sk->sk_err = err;
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
545 if (conn || status)
546 return conn;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 if (!conn)
550 return NULL;
552 hcon->l2cap_data = conn;
553 conn->hcon = hcon;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
561 conn->feat_mask = 0;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
571 return conn;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
577 struct sock *sk;
579 if (!conn)
580 return;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
586 /* Kill channels */
587 while ((sk = conn->chan_list.head)) {
588 bh_lock_sock(sk);
589 l2cap_chan_del(sk, err);
590 bh_unlock_sock(sk);
591 l2cap_sock_kill(sk);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
598 kfree(conn);
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
612 struct sock *sk;
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
616 goto found;
617 sk = NULL;
618 found:
619 return sk;
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
632 continue;
634 if (l2cap_pi(sk)->psm == psm) {
635 /* Exact match. */
636 if (!bacmp(&bt_sk(sk)->src, src))
637 break;
639 /* Closest match */
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 sk1 = sk;
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *s;
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
654 if (s)
655 bh_lock_sock(s);
656 read_unlock(&l2cap_sk_list.lock);
657 return s;
660 static void l2cap_sock_destruct(struct sock *sk)
662 BT_DBG("sk %p", sk);
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
670 struct sock *sk;
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 return;
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
695 sock_put(sk);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
703 case BT_LISTEN:
704 l2cap_sock_cleanup_listen(sk);
705 break;
707 case BT_CONNECTED:
708 case BT_CONFIG:
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
715 } else
716 l2cap_chan_del(sk, reason);
717 break;
719 case BT_CONNECT2:
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
723 __u16 result;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
727 else
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
736 } else
737 l2cap_chan_del(sk, reason);
738 break;
740 case BT_CONNECT:
741 case BT_DISCONN:
742 l2cap_chan_del(sk, reason);
743 break;
745 default:
746 sock_set_flag(sk, SOCK_ZAPPED);
747 break;
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
755 lock_sock(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
757 release_sock(sk);
758 l2cap_sock_kill(sk);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
765 BT_DBG("sk %p", sk);
767 if (parent) {
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
778 } else {
779 pi->imtu = L2CAP_DEFAULT_MTU;
780 pi->omtu = 0;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
784 pi->role_switch = 0;
785 pi->force_reliable = 0;
788 /* Default config options */
789 pi->conf_len = 0;
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
797 .name = "L2CAP",
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
804 struct sock *sk;
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 if (!sk)
808 return NULL;
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
824 return sk;
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
828 int kern)
830 struct sock *sk;
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
841 return -EPERM;
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 if (!sk)
847 return -ENOMEM;
849 l2cap_sock_init(sk, NULL);
850 return 0;
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
857 int len, err = 0;
859 BT_DBG("sk %p", sk);
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
862 return -EINVAL;
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
868 if (la.l2_cid)
869 return -EINVAL;
871 lock_sock(sk);
873 if (sk->sk_state != BT_OPEN) {
874 err = -EBADFD;
875 goto done;
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
880 err = -EACCES;
881 goto done;
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
887 err = -EADDRINUSE;
888 } else {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
902 done:
903 release_sock(sk);
904 return err;
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
914 __u8 auth_type;
915 int err;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
918 l2cap_pi(sk)->psm);
920 hdev = hci_get_route(dst, src);
921 if (!hdev)
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
926 err = -ENOMEM;
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
932 break;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
935 break;
936 default:
937 auth_type = HCI_AT_NO_BONDING;
938 break;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
943 else
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
948 } else {
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
952 break;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
955 break;
956 default:
957 auth_type = HCI_AT_NO_BONDING;
958 break;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
964 if (!hcon)
965 goto done;
967 conn = l2cap_conn_add(hcon, 0);
968 if (!conn) {
969 hci_conn_put(hcon);
970 goto done;
973 err = 0;
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
987 } else
988 l2cap_do_start(sk);
991 done:
992 hci_dev_unlock_bh(hdev);
993 hci_dev_put(hdev);
994 return err;
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1013 if (la.l2_cid)
1014 return -EINVAL;
1016 lock_sock(sk);
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1019 err = -EINVAL;
1020 goto done;
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1025 break;
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1028 if (enable_ertm)
1029 break;
1030 /* fall through */
1031 default:
1032 err = -ENOTSUPP;
1033 goto done;
1036 switch (sk->sk_state) {
1037 case BT_CONNECT:
1038 case BT_CONNECT2:
1039 case BT_CONFIG:
1040 /* Already connecting */
1041 goto wait;
1043 case BT_CONNECTED:
1044 /* Already connected */
1045 goto done;
1047 case BT_OPEN:
1048 case BT_BOUND:
1049 /* Can connect */
1050 break;
1052 default:
1053 err = -EBADFD;
1054 goto done;
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1061 err = l2cap_do_connect(sk);
1062 if (err)
1063 goto done;
1065 wait:
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1068 done:
1069 release_sock(sk);
1070 return err;
1073 static int l2cap_sock_listen(struct socket *sock, int backlog)
1075 struct sock *sk = sock->sk;
1076 int err = 0;
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1080 lock_sock(sk);
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1083 err = -EBADFD;
1084 goto done;
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1089 break;
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1092 if (enable_ertm)
1093 break;
1094 /* fall through */
1095 default:
1096 err = -ENOTSUPP;
1097 goto done;
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1102 u16 psm;
1104 err = -EINVAL;
1106 write_lock_bh(&l2cap_sk_list.lock);
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1112 err = 0;
1113 break;
1116 write_unlock_bh(&l2cap_sk_list.lock);
1118 if (err < 0)
1119 goto done;
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1126 done:
1127 release_sock(sk);
1128 return err;
1131 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1135 long timeo;
1136 int err = 0;
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1140 if (sk->sk_state != BT_LISTEN) {
1141 err = -EBADFD;
1142 goto done;
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) {
1154 err = -EAGAIN;
1155 break;
1158 release_sock(sk);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1162 if (sk->sk_state != BT_LISTEN) {
1163 err = -EBADFD;
1164 break;
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1169 break;
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk_sleep(sk), &wait);
1175 if (err)
1176 goto done;
1178 newsock->state = SS_CONNECTED;
1180 BT_DBG("new socket %p", nsk);
1182 done:
1183 release_sock(sk);
1184 return err;
1187 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1197 if (peer) {
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1201 } else {
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1207 return 0;
1210 static void l2cap_monitor_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1213 u16 control;
1215 bh_lock_sock(sk);
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1218 bh_unlock_sock(sk);
1219 return;
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1227 bh_unlock_sock(sk);
1230 static void l2cap_retrans_timeout(unsigned long arg)
1232 struct sock *sk = (void *) arg;
1233 u16 control;
1235 bh_lock_sock(sk);
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1243 bh_unlock_sock(sk);
1246 static void l2cap_drop_acked_frames(struct sock *sk)
1248 struct sk_buff *skb;
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1252 break;
1254 skb = skb_dequeue(TX_QUEUE(sk));
1255 kfree_skb(skb);
1257 l2cap_pi(sk)->unacked_frames--;
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1263 return;
1266 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1269 int err;
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1274 if (err < 0)
1275 kfree_skb(skb);
1277 return err;
1280 static int l2cap_streaming_send(struct sock *sk)
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1284 u16 control, fcs;
1285 int err;
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1299 err = l2cap_do_send(sk, tx_skb);
1300 if (err < 0) {
1301 l2cap_send_disconn_req(pi->conn, sk);
1302 return err;
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1309 else
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1312 skb = skb_dequeue(TX_QUEUE(sk));
1313 kfree_skb(skb);
1315 return 0;
1318 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1322 u16 control, fcs;
1323 int err;
1325 skb = skb_peek(TX_QUEUE(sk));
1326 do {
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1329 break;
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1331 continue;
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1337 break;
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1347 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 err = l2cap_do_send(sk, tx_skb);
1353 if (err < 0) {
1354 l2cap_send_disconn_req(pi->conn, sk);
1355 return err;
1357 break;
1358 } while(1);
1359 return 0;
1362 static int l2cap_ertm_send(struct sock *sk)
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 u16 control, fcs;
1367 int err;
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1370 return 0;
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1378 break;
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1387 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1393 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1396 err = l2cap_do_send(sk, tx_skb);
1397 if (err < 0) {
1398 l2cap_send_disconn_req(pi->conn, sk);
1399 return err;
1401 __mod_retrans_timer();
1403 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1404 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1406 pi->unacked_frames++;
1408 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1409 sk->sk_send_head = NULL;
1410 else
1411 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1414 return 0;
1417 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1419 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1420 struct sk_buff **frag;
1421 int err, sent = 0;
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1424 return -EFAULT;
1427 sent += count;
1428 len -= count;
1430 /* Continuation fragments (no L2CAP header) */
1431 frag = &skb_shinfo(skb)->frag_list;
1432 while (len) {
1433 count = min_t(unsigned int, conn->mtu, len);
1435 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1436 if (!*frag)
1437 return -EFAULT;
1438 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1439 return -EFAULT;
1441 sent += count;
1442 len -= count;
1444 frag = &(*frag)->next;
1447 return sent;
1450 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1452 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1457 BT_DBG("sk %p len %d", sk, (int)len);
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!skb)
1463 return ERR_PTR(-ENOMEM);
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1473 kfree_skb(skb);
1474 return ERR_PTR(err);
1476 return skb;
1479 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1481 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1482 struct sk_buff *skb;
1483 int err, count, hlen = L2CAP_HDR_SIZE;
1484 struct l2cap_hdr *lh;
1486 BT_DBG("sk %p len %d", sk, (int)len);
1488 count = min_t(unsigned int, (conn->mtu - hlen), len);
1489 skb = bt_skb_send_alloc(sk, count + hlen,
1490 msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (!skb)
1492 return ERR_PTR(-ENOMEM);
1494 /* Create L2CAP header */
1495 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1496 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1497 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1499 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1500 if (unlikely(err < 0)) {
1501 kfree_skb(skb);
1502 return ERR_PTR(err);
1504 return skb;
1507 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1509 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d", sk, (int)len);
1516 if (sdulen)
1517 hlen += 2;
1519 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1520 hlen += 2;
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1525 if (!skb)
1526 return ERR_PTR(-ENOMEM);
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532 put_unaligned_le16(control, skb_put(skb, 2));
1533 if (sdulen)
1534 put_unaligned_le16(sdulen, skb_put(skb, 2));
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1538 kfree_skb(skb);
1539 return ERR_PTR(err);
1542 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1543 put_unaligned_le16(0, skb_put(skb, 2));
1545 bt_cb(skb)->retries = 0;
1546 return skb;
1549 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1551 struct l2cap_pinfo *pi = l2cap_pi(sk);
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1554 u16 control;
1555 size_t size = 0;
1557 __skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= pi->max_pdu_size;
1565 size +=pi->max_pdu_size;
1566 control = 0;
1568 while (len > 0) {
1569 size_t buflen;
1571 if (len > pi->max_pdu_size) {
1572 control |= L2CAP_SDU_CONTINUE;
1573 buflen = pi->max_pdu_size;
1574 } else {
1575 control |= L2CAP_SDU_END;
1576 buflen = len;
1579 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1580 if (IS_ERR(skb)) {
1581 skb_queue_purge(&sar_queue);
1582 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= buflen;
1587 size += buflen;
1588 control = 0;
1590 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1591 if (sk->sk_send_head == NULL)
1592 sk->sk_send_head = sar_queue.next;
1594 return size;
1597 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1599 struct sock *sk = sock->sk;
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1602 u16 control;
1603 int err;
1605 BT_DBG("sock %p, sk %p", sock, sk);
1607 err = sock_error(sk);
1608 if (err)
1609 return err;
1611 if (msg->msg_flags & MSG_OOB)
1612 return -EOPNOTSUPP;
1614 /* Check outgoing MTU */
1615 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1616 len > pi->omtu)
1617 return -EINVAL;
1619 lock_sock(sk);
1621 if (sk->sk_state != BT_CONNECTED) {
1622 err = -ENOTCONN;
1623 goto done;
1626 /* Connectionless channel */
1627 if (sk->sk_type == SOCK_DGRAM) {
1628 skb = l2cap_create_connless_pdu(sk, msg, len);
1629 err = l2cap_do_send(sk, skb);
1630 goto done;
1633 switch (pi->mode) {
1634 case L2CAP_MODE_BASIC:
1635 /* Create a basic PDU */
1636 skb = l2cap_create_basic_pdu(sk, msg, len);
1637 if (IS_ERR(skb)) {
1638 err = PTR_ERR(skb);
1639 goto done;
1642 err = l2cap_do_send(sk, skb);
1643 if (!err)
1644 err = len;
1645 break;
1647 case L2CAP_MODE_ERTM:
1648 case L2CAP_MODE_STREAMING:
1649 /* Entire SDU fits into one PDU */
1650 if (len <= pi->max_pdu_size) {
1651 control = L2CAP_SDU_UNSEGMENTED;
1652 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1653 if (IS_ERR(skb)) {
1654 err = PTR_ERR(skb);
1655 goto done;
1657 __skb_queue_tail(TX_QUEUE(sk), skb);
1658 if (sk->sk_send_head == NULL)
1659 sk->sk_send_head = skb;
1660 } else {
1661 /* Segment SDU into multiples PDUs */
1662 err = l2cap_sar_segment_sdu(sk, msg, len);
1663 if (err < 0)
1664 goto done;
1667 if (pi->mode == L2CAP_MODE_STREAMING)
1668 err = l2cap_streaming_send(sk);
1669 else
1670 err = l2cap_ertm_send(sk);
1672 if (!err)
1673 err = len;
1674 break;
1676 default:
1677 BT_DBG("bad state %1.1x", pi->mode);
1678 err = -EINVAL;
1681 done:
1682 release_sock(sk);
1683 return err;
1686 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1688 struct sock *sk = sock->sk;
1690 lock_sock(sk);
1692 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1693 struct l2cap_conn_rsp rsp;
1695 sk->sk_state = BT_CONFIG;
1697 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1699 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1700 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1701 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1702 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1704 release_sock(sk);
1705 return 0;
1708 release_sock(sk);
1710 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1713 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1715 struct sock *sk = sock->sk;
1716 struct l2cap_options opts;
1717 int len, err = 0;
1718 u32 opt;
1720 BT_DBG("sk %p", sk);
1722 lock_sock(sk);
1724 switch (optname) {
1725 case L2CAP_OPTIONS:
1726 opts.imtu = l2cap_pi(sk)->imtu;
1727 opts.omtu = l2cap_pi(sk)->omtu;
1728 opts.flush_to = l2cap_pi(sk)->flush_to;
1729 opts.mode = l2cap_pi(sk)->mode;
1730 opts.fcs = l2cap_pi(sk)->fcs;
1732 len = min_t(unsigned int, sizeof(opts), optlen);
1733 if (copy_from_user((char *) &opts, optval, len)) {
1734 err = -EFAULT;
1735 break;
1738 l2cap_pi(sk)->imtu = opts.imtu;
1739 l2cap_pi(sk)->omtu = opts.omtu;
1740 l2cap_pi(sk)->mode = opts.mode;
1741 l2cap_pi(sk)->fcs = opts.fcs;
1742 break;
1744 case L2CAP_LM:
1745 if (get_user(opt, (u32 __user *) optval)) {
1746 err = -EFAULT;
1747 break;
1750 if (opt & L2CAP_LM_AUTH)
1751 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1752 if (opt & L2CAP_LM_ENCRYPT)
1753 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1754 if (opt & L2CAP_LM_SECURE)
1755 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1757 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1758 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1759 break;
1761 default:
1762 err = -ENOPROTOOPT;
1763 break;
1766 release_sock(sk);
1767 return err;
1770 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1772 struct sock *sk = sock->sk;
1773 struct bt_security sec;
1774 int len, err = 0;
1775 u32 opt;
1777 BT_DBG("sk %p", sk);
1779 if (level == SOL_L2CAP)
1780 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1782 if (level != SOL_BLUETOOTH)
1783 return -ENOPROTOOPT;
1785 lock_sock(sk);
1787 switch (optname) {
1788 case BT_SECURITY:
1789 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1790 err = -EINVAL;
1791 break;
1794 sec.level = BT_SECURITY_LOW;
1796 len = min_t(unsigned int, sizeof(sec), optlen);
1797 if (copy_from_user((char *) &sec, optval, len)) {
1798 err = -EFAULT;
1799 break;
1802 if (sec.level < BT_SECURITY_LOW ||
1803 sec.level > BT_SECURITY_HIGH) {
1804 err = -EINVAL;
1805 break;
1808 l2cap_pi(sk)->sec_level = sec.level;
1809 break;
1811 case BT_DEFER_SETUP:
1812 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1813 err = -EINVAL;
1814 break;
1817 if (get_user(opt, (u32 __user *) optval)) {
1818 err = -EFAULT;
1819 break;
1822 bt_sk(sk)->defer_setup = opt;
1823 break;
1825 default:
1826 err = -ENOPROTOOPT;
1827 break;
1830 release_sock(sk);
1831 return err;
1834 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1836 struct sock *sk = sock->sk;
1837 struct l2cap_options opts;
1838 struct l2cap_conninfo cinfo;
1839 int len, err = 0;
1840 u32 opt;
1842 BT_DBG("sk %p", sk);
1844 if (get_user(len, optlen))
1845 return -EFAULT;
1847 lock_sock(sk);
1849 switch (optname) {
1850 case L2CAP_OPTIONS:
1851 opts.imtu = l2cap_pi(sk)->imtu;
1852 opts.omtu = l2cap_pi(sk)->omtu;
1853 opts.flush_to = l2cap_pi(sk)->flush_to;
1854 opts.mode = l2cap_pi(sk)->mode;
1855 opts.fcs = l2cap_pi(sk)->fcs;
1857 len = min_t(unsigned int, len, sizeof(opts));
1858 if (copy_to_user(optval, (char *) &opts, len))
1859 err = -EFAULT;
1861 break;
1863 case L2CAP_LM:
1864 switch (l2cap_pi(sk)->sec_level) {
1865 case BT_SECURITY_LOW:
1866 opt = L2CAP_LM_AUTH;
1867 break;
1868 case BT_SECURITY_MEDIUM:
1869 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1870 break;
1871 case BT_SECURITY_HIGH:
1872 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1873 L2CAP_LM_SECURE;
1874 break;
1875 default:
1876 opt = 0;
1877 break;
1880 if (l2cap_pi(sk)->role_switch)
1881 opt |= L2CAP_LM_MASTER;
1883 if (l2cap_pi(sk)->force_reliable)
1884 opt |= L2CAP_LM_RELIABLE;
1886 if (put_user(opt, (u32 __user *) optval))
1887 err = -EFAULT;
1888 break;
1890 case L2CAP_CONNINFO:
1891 if (sk->sk_state != BT_CONNECTED &&
1892 !(sk->sk_state == BT_CONNECT2 &&
1893 bt_sk(sk)->defer_setup)) {
1894 err = -ENOTCONN;
1895 break;
1898 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1899 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1901 len = min_t(unsigned int, len, sizeof(cinfo));
1902 if (copy_to_user(optval, (char *) &cinfo, len))
1903 err = -EFAULT;
1905 break;
1907 default:
1908 err = -ENOPROTOOPT;
1909 break;
1912 release_sock(sk);
1913 return err;
1916 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1918 struct sock *sk = sock->sk;
1919 struct bt_security sec;
1920 int len, err = 0;
1922 BT_DBG("sk %p", sk);
1924 if (level == SOL_L2CAP)
1925 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1927 if (level != SOL_BLUETOOTH)
1928 return -ENOPROTOOPT;
1930 if (get_user(len, optlen))
1931 return -EFAULT;
1933 lock_sock(sk);
1935 switch (optname) {
1936 case BT_SECURITY:
1937 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1938 err = -EINVAL;
1939 break;
1942 sec.level = l2cap_pi(sk)->sec_level;
1944 len = min_t(unsigned int, len, sizeof(sec));
1945 if (copy_to_user(optval, (char *) &sec, len))
1946 err = -EFAULT;
1948 break;
1950 case BT_DEFER_SETUP:
1951 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1952 err = -EINVAL;
1953 break;
1956 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1957 err = -EFAULT;
1959 break;
1961 default:
1962 err = -ENOPROTOOPT;
1963 break;
1966 release_sock(sk);
1967 return err;
1970 static int l2cap_sock_shutdown(struct socket *sock, int how)
1972 struct sock *sk = sock->sk;
1973 int err = 0;
1975 BT_DBG("sock %p, sk %p", sock, sk);
1977 if (!sk)
1978 return 0;
1980 lock_sock(sk);
1981 if (!sk->sk_shutdown) {
1982 sk->sk_shutdown = SHUTDOWN_MASK;
1983 l2cap_sock_clear_timer(sk);
1984 __l2cap_sock_close(sk, 0);
1986 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1987 err = bt_sock_wait_state(sk, BT_CLOSED,
1988 sk->sk_lingertime);
1990 release_sock(sk);
1991 return err;
1994 static int l2cap_sock_release(struct socket *sock)
1996 struct sock *sk = sock->sk;
1997 int err;
1999 BT_DBG("sock %p, sk %p", sock, sk);
2001 if (!sk)
2002 return 0;
2004 err = l2cap_sock_shutdown(sock, 2);
2006 sock_orphan(sk);
2007 l2cap_sock_kill(sk);
2008 return err;
2011 static void l2cap_chan_ready(struct sock *sk)
2013 struct sock *parent = bt_sk(sk)->parent;
2015 BT_DBG("sk %p, parent %p", sk, parent);
2017 l2cap_pi(sk)->conf_state = 0;
2018 l2cap_sock_clear_timer(sk);
2020 if (!parent) {
2021 /* Outgoing channel.
2022 * Wake up socket sleeping on connect.
2024 sk->sk_state = BT_CONNECTED;
2025 sk->sk_state_change(sk);
2026 } else {
2027 /* Incoming channel.
2028 * Wake up socket sleeping on accept.
2030 parent->sk_data_ready(parent, 0);
2034 /* Copy frame to all raw sockets on that connection */
2035 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2037 struct l2cap_chan_list *l = &conn->chan_list;
2038 struct sk_buff *nskb;
2039 struct sock *sk;
2041 BT_DBG("conn %p", conn);
2043 read_lock(&l->lock);
2044 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2045 if (sk->sk_type != SOCK_RAW)
2046 continue;
2048 /* Don't send frame to the socket it came from */
2049 if (skb->sk == sk)
2050 continue;
2051 nskb = skb_clone(skb, GFP_ATOMIC);
2052 if (!nskb)
2053 continue;
2055 if (sock_queue_rcv_skb(sk, nskb))
2056 kfree_skb(nskb);
2058 read_unlock(&l->lock);
2061 /* ---- L2CAP signalling commands ---- */
2062 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2063 u8 code, u8 ident, u16 dlen, void *data)
2065 struct sk_buff *skb, **frag;
2066 struct l2cap_cmd_hdr *cmd;
2067 struct l2cap_hdr *lh;
2068 int len, count;
2070 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2071 conn, code, ident, dlen);
2073 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2074 count = min_t(unsigned int, conn->mtu, len);
2076 skb = bt_skb_alloc(count, GFP_ATOMIC);
2077 if (!skb)
2078 return NULL;
2080 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2081 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2082 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2084 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2085 cmd->code = code;
2086 cmd->ident = ident;
2087 cmd->len = cpu_to_le16(dlen);
2089 if (dlen) {
2090 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2091 memcpy(skb_put(skb, count), data, count);
2092 data += count;
2095 len -= skb->len;
2097 /* Continuation fragments (no L2CAP header) */
2098 frag = &skb_shinfo(skb)->frag_list;
2099 while (len) {
2100 count = min_t(unsigned int, conn->mtu, len);
2102 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2103 if (!*frag)
2104 goto fail;
2106 memcpy(skb_put(*frag, count), data, count);
2108 len -= count;
2109 data += count;
2111 frag = &(*frag)->next;
2114 return skb;
2116 fail:
2117 kfree_skb(skb);
2118 return NULL;
2121 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2123 struct l2cap_conf_opt *opt = *ptr;
2124 int len;
2126 len = L2CAP_CONF_OPT_SIZE + opt->len;
2127 *ptr += len;
2129 *type = opt->type;
2130 *olen = opt->len;
2132 switch (opt->len) {
2133 case 1:
2134 *val = *((u8 *) opt->val);
2135 break;
2137 case 2:
2138 *val = __le16_to_cpu(*((__le16 *) opt->val));
2139 break;
2141 case 4:
2142 *val = __le32_to_cpu(*((__le32 *) opt->val));
2143 break;
2145 default:
2146 *val = (unsigned long) opt->val;
2147 break;
2150 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2151 return len;
2154 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2156 struct l2cap_conf_opt *opt = *ptr;
2158 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2160 opt->type = type;
2161 opt->len = len;
2163 switch (len) {
2164 case 1:
2165 *((u8 *) opt->val) = val;
2166 break;
2168 case 2:
2169 *((__le16 *) opt->val) = cpu_to_le16(val);
2170 break;
2172 case 4:
2173 *((__le32 *) opt->val) = cpu_to_le32(val);
2174 break;
2176 default:
2177 memcpy(opt->val, (void *) val, len);
2178 break;
2181 *ptr += L2CAP_CONF_OPT_SIZE + len;
2184 static inline void l2cap_ertm_init(struct sock *sk)
2186 l2cap_pi(sk)->expected_ack_seq = 0;
2187 l2cap_pi(sk)->unacked_frames = 0;
2188 l2cap_pi(sk)->buffer_seq = 0;
2189 l2cap_pi(sk)->num_to_ack = 0;
2191 setup_timer(&l2cap_pi(sk)->retrans_timer,
2192 l2cap_retrans_timeout, (unsigned long) sk);
2193 setup_timer(&l2cap_pi(sk)->monitor_timer,
2194 l2cap_monitor_timeout, (unsigned long) sk);
2196 __skb_queue_head_init(SREJ_QUEUE(sk));
2199 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2201 u32 local_feat_mask = l2cap_feat_mask;
2202 if (enable_ertm)
2203 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2205 switch (mode) {
2206 case L2CAP_MODE_ERTM:
2207 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2208 case L2CAP_MODE_STREAMING:
2209 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2210 default:
2211 return 0x00;
2215 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2217 switch (mode) {
2218 case L2CAP_MODE_STREAMING:
2219 case L2CAP_MODE_ERTM:
2220 if (l2cap_mode_supported(mode, remote_feat_mask))
2221 return mode;
2222 /* fall through */
2223 default:
2224 return L2CAP_MODE_BASIC;
2228 static int l2cap_build_conf_req(struct sock *sk, void *data)
2230 struct l2cap_pinfo *pi = l2cap_pi(sk);
2231 struct l2cap_conf_req *req = data;
2232 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2233 void *ptr = req->data;
2235 BT_DBG("sk %p", sk);
2237 if (pi->num_conf_req || pi->num_conf_rsp)
2238 goto done;
2240 switch (pi->mode) {
2241 case L2CAP_MODE_STREAMING:
2242 case L2CAP_MODE_ERTM:
2243 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2244 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2245 l2cap_send_disconn_req(pi->conn, sk);
2246 break;
2247 default:
2248 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2249 break;
2252 done:
2253 switch (pi->mode) {
2254 case L2CAP_MODE_BASIC:
2255 if (pi->imtu != L2CAP_DEFAULT_MTU)
2256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2257 break;
2259 case L2CAP_MODE_ERTM:
2260 rfc.mode = L2CAP_MODE_ERTM;
2261 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2262 rfc.max_transmit = max_transmit;
2263 rfc.retrans_timeout = 0;
2264 rfc.monitor_timeout = 0;
2265 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2267 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2268 sizeof(rfc), (unsigned long) &rfc);
2270 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2271 break;
2273 if (pi->fcs == L2CAP_FCS_NONE ||
2274 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2275 pi->fcs = L2CAP_FCS_NONE;
2276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2278 break;
2280 case L2CAP_MODE_STREAMING:
2281 rfc.mode = L2CAP_MODE_STREAMING;
2282 rfc.txwin_size = 0;
2283 rfc.max_transmit = 0;
2284 rfc.retrans_timeout = 0;
2285 rfc.monitor_timeout = 0;
2286 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2289 sizeof(rfc), (unsigned long) &rfc);
2291 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2292 break;
2294 if (pi->fcs == L2CAP_FCS_NONE ||
2295 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2296 pi->fcs = L2CAP_FCS_NONE;
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2299 break;
2302 /* FIXME: Need actual value of the flush timeout */
2303 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2304 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2306 req->dcid = cpu_to_le16(pi->dcid);
2307 req->flags = cpu_to_le16(0);
2309 return ptr - data;
2312 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2314 struct l2cap_pinfo *pi = l2cap_pi(sk);
2315 struct l2cap_conf_rsp *rsp = data;
2316 void *ptr = rsp->data;
2317 void *req = pi->conf_req;
2318 int len = pi->conf_len;
2319 int type, hint, olen;
2320 unsigned long val;
2321 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2322 u16 mtu = L2CAP_DEFAULT_MTU;
2323 u16 result = L2CAP_CONF_SUCCESS;
2325 BT_DBG("sk %p", sk);
2327 while (len >= L2CAP_CONF_OPT_SIZE) {
2328 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2330 hint = type & L2CAP_CONF_HINT;
2331 type &= L2CAP_CONF_MASK;
2333 switch (type) {
2334 case L2CAP_CONF_MTU:
2335 mtu = val;
2336 break;
2338 case L2CAP_CONF_FLUSH_TO:
2339 pi->flush_to = val;
2340 break;
2342 case L2CAP_CONF_QOS:
2343 break;
2345 case L2CAP_CONF_RFC:
2346 if (olen == sizeof(rfc))
2347 memcpy(&rfc, (void *) val, olen);
2348 break;
2350 case L2CAP_CONF_FCS:
2351 if (val == L2CAP_FCS_NONE)
2352 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2354 break;
2356 default:
2357 if (hint)
2358 break;
2360 result = L2CAP_CONF_UNKNOWN;
2361 *((u8 *) ptr++) = type;
2362 break;
2366 if (pi->num_conf_rsp || pi->num_conf_req)
2367 goto done;
2369 switch (pi->mode) {
2370 case L2CAP_MODE_STREAMING:
2371 case L2CAP_MODE_ERTM:
2372 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2373 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2374 return -ECONNREFUSED;
2375 break;
2376 default:
2377 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2378 break;
2381 done:
2382 if (pi->mode != rfc.mode) {
2383 result = L2CAP_CONF_UNACCEPT;
2384 rfc.mode = pi->mode;
2386 if (pi->num_conf_rsp == 1)
2387 return -ECONNREFUSED;
2389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2390 sizeof(rfc), (unsigned long) &rfc);
2394 if (result == L2CAP_CONF_SUCCESS) {
2395 /* Configure output options and let the other side know
2396 * which ones we don't like. */
2398 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2399 result = L2CAP_CONF_UNACCEPT;
2400 else {
2401 pi->omtu = mtu;
2402 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2406 switch (rfc.mode) {
2407 case L2CAP_MODE_BASIC:
2408 pi->fcs = L2CAP_FCS_NONE;
2409 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2410 break;
2412 case L2CAP_MODE_ERTM:
2413 pi->remote_tx_win = rfc.txwin_size;
2414 pi->remote_max_tx = rfc.max_transmit;
2415 pi->max_pdu_size = rfc.max_pdu_size;
2417 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2418 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2420 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2423 sizeof(rfc), (unsigned long) &rfc);
2425 break;
2427 case L2CAP_MODE_STREAMING:
2428 pi->remote_tx_win = rfc.txwin_size;
2429 pi->max_pdu_size = rfc.max_pdu_size;
2431 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2433 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2434 sizeof(rfc), (unsigned long) &rfc);
2436 break;
2438 default:
2439 result = L2CAP_CONF_UNACCEPT;
2441 memset(&rfc, 0, sizeof(rfc));
2442 rfc.mode = pi->mode;
2445 if (result == L2CAP_CONF_SUCCESS)
2446 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2448 rsp->scid = cpu_to_le16(pi->dcid);
2449 rsp->result = cpu_to_le16(result);
2450 rsp->flags = cpu_to_le16(0x0000);
2452 return ptr - data;
2455 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2457 struct l2cap_pinfo *pi = l2cap_pi(sk);
2458 struct l2cap_conf_req *req = data;
2459 void *ptr = req->data;
2460 int type, olen;
2461 unsigned long val;
2462 struct l2cap_conf_rfc rfc;
2464 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2466 while (len >= L2CAP_CONF_OPT_SIZE) {
2467 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2469 switch (type) {
2470 case L2CAP_CONF_MTU:
2471 if (val < L2CAP_DEFAULT_MIN_MTU) {
2472 *result = L2CAP_CONF_UNACCEPT;
2473 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2474 } else
2475 pi->omtu = val;
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2477 break;
2479 case L2CAP_CONF_FLUSH_TO:
2480 pi->flush_to = val;
2481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2482 2, pi->flush_to);
2483 break;
2485 case L2CAP_CONF_RFC:
2486 if (olen == sizeof(rfc))
2487 memcpy(&rfc, (void *)val, olen);
2489 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2490 rfc.mode != pi->mode)
2491 return -ECONNREFUSED;
2493 pi->mode = rfc.mode;
2494 pi->fcs = 0;
2496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2497 sizeof(rfc), (unsigned long) &rfc);
2498 break;
2502 if (*result == L2CAP_CONF_SUCCESS) {
2503 switch (rfc.mode) {
2504 case L2CAP_MODE_ERTM:
2505 pi->remote_tx_win = rfc.txwin_size;
2506 pi->retrans_timeout = rfc.retrans_timeout;
2507 pi->monitor_timeout = rfc.monitor_timeout;
2508 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2509 break;
2510 case L2CAP_MODE_STREAMING:
2511 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2512 break;
2516 req->dcid = cpu_to_le16(pi->dcid);
2517 req->flags = cpu_to_le16(0x0000);
2519 return ptr - data;
2522 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2524 struct l2cap_conf_rsp *rsp = data;
2525 void *ptr = rsp->data;
2527 BT_DBG("sk %p", sk);
2529 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2530 rsp->result = cpu_to_le16(result);
2531 rsp->flags = cpu_to_le16(flags);
2533 return ptr - data;
2536 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2538 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2540 if (rej->reason != 0x0000)
2541 return 0;
2543 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2544 cmd->ident == conn->info_ident) {
2545 del_timer(&conn->info_timer);
2547 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2548 conn->info_ident = 0;
2550 l2cap_conn_start(conn);
2553 return 0;
2556 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2558 struct l2cap_chan_list *list = &conn->chan_list;
2559 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2560 struct l2cap_conn_rsp rsp;
2561 struct sock *sk, *parent;
2562 int result, status = L2CAP_CS_NO_INFO;
2564 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2565 __le16 psm = req->psm;
2567 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2569 /* Check if we have socket listening on psm */
2570 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2571 if (!parent) {
2572 result = L2CAP_CR_BAD_PSM;
2573 goto sendresp;
2576 /* Check if the ACL is secure enough (if not SDP) */
2577 if (psm != cpu_to_le16(0x0001) &&
2578 !hci_conn_check_link_mode(conn->hcon)) {
2579 conn->disc_reason = 0x05;
2580 result = L2CAP_CR_SEC_BLOCK;
2581 goto response;
2584 result = L2CAP_CR_NO_MEM;
2586 /* Check for backlog size */
2587 if (sk_acceptq_is_full(parent)) {
2588 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2589 goto response;
2592 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2593 if (!sk)
2594 goto response;
2596 write_lock_bh(&list->lock);
2598 /* Check if we already have channel with that dcid */
2599 if (__l2cap_get_chan_by_dcid(list, scid)) {
2600 write_unlock_bh(&list->lock);
2601 sock_set_flag(sk, SOCK_ZAPPED);
2602 l2cap_sock_kill(sk);
2603 goto response;
2606 hci_conn_hold(conn->hcon);
2608 l2cap_sock_init(sk, parent);
2609 bacpy(&bt_sk(sk)->src, conn->src);
2610 bacpy(&bt_sk(sk)->dst, conn->dst);
2611 l2cap_pi(sk)->psm = psm;
2612 l2cap_pi(sk)->dcid = scid;
2614 __l2cap_chan_add(conn, sk, parent);
2615 dcid = l2cap_pi(sk)->scid;
2617 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2619 l2cap_pi(sk)->ident = cmd->ident;
2621 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2622 if (l2cap_check_security(sk)) {
2623 if (bt_sk(sk)->defer_setup) {
2624 sk->sk_state = BT_CONNECT2;
2625 result = L2CAP_CR_PEND;
2626 status = L2CAP_CS_AUTHOR_PEND;
2627 parent->sk_data_ready(parent, 0);
2628 } else {
2629 sk->sk_state = BT_CONFIG;
2630 result = L2CAP_CR_SUCCESS;
2631 status = L2CAP_CS_NO_INFO;
2633 } else {
2634 sk->sk_state = BT_CONNECT2;
2635 result = L2CAP_CR_PEND;
2636 status = L2CAP_CS_AUTHEN_PEND;
2638 } else {
2639 sk->sk_state = BT_CONNECT2;
2640 result = L2CAP_CR_PEND;
2641 status = L2CAP_CS_NO_INFO;
2644 write_unlock_bh(&list->lock);
2646 response:
2647 bh_unlock_sock(parent);
2649 sendresp:
2650 rsp.scid = cpu_to_le16(scid);
2651 rsp.dcid = cpu_to_le16(dcid);
2652 rsp.result = cpu_to_le16(result);
2653 rsp.status = cpu_to_le16(status);
2654 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2656 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2657 struct l2cap_info_req info;
2658 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2660 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2661 conn->info_ident = l2cap_get_ident(conn);
2663 mod_timer(&conn->info_timer, jiffies +
2664 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2666 l2cap_send_cmd(conn, conn->info_ident,
2667 L2CAP_INFO_REQ, sizeof(info), &info);
2670 return 0;
2673 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2675 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2676 u16 scid, dcid, result, status;
2677 struct sock *sk;
2678 u8 req[128];
2680 scid = __le16_to_cpu(rsp->scid);
2681 dcid = __le16_to_cpu(rsp->dcid);
2682 result = __le16_to_cpu(rsp->result);
2683 status = __le16_to_cpu(rsp->status);
2685 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2687 if (scid) {
2688 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2689 if (!sk)
2690 return 0;
2691 } else {
2692 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2693 if (!sk)
2694 return 0;
2697 switch (result) {
2698 case L2CAP_CR_SUCCESS:
2699 sk->sk_state = BT_CONFIG;
2700 l2cap_pi(sk)->ident = 0;
2701 l2cap_pi(sk)->dcid = dcid;
2702 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2704 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2706 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2707 l2cap_build_conf_req(sk, req), req);
2708 l2cap_pi(sk)->num_conf_req++;
2709 break;
2711 case L2CAP_CR_PEND:
2712 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2713 break;
2715 default:
2716 l2cap_chan_del(sk, ECONNREFUSED);
2717 break;
2720 bh_unlock_sock(sk);
2721 return 0;
2724 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2726 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2727 u16 dcid, flags;
2728 u8 rsp[64];
2729 struct sock *sk;
2730 int len;
2732 dcid = __le16_to_cpu(req->dcid);
2733 flags = __le16_to_cpu(req->flags);
2735 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2737 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2738 if (!sk)
2739 return -ENOENT;
2741 if (sk->sk_state == BT_DISCONN)
2742 goto unlock;
2744 /* Reject if config buffer is too small. */
2745 len = cmd_len - sizeof(*req);
2746 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2747 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2748 l2cap_build_conf_rsp(sk, rsp,
2749 L2CAP_CONF_REJECT, flags), rsp);
2750 goto unlock;
2753 /* Store config. */
2754 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2755 l2cap_pi(sk)->conf_len += len;
2757 if (flags & 0x0001) {
2758 /* Incomplete config. Send empty response. */
2759 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2760 l2cap_build_conf_rsp(sk, rsp,
2761 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2762 goto unlock;
2765 /* Complete config. */
2766 len = l2cap_parse_conf_req(sk, rsp);
2767 if (len < 0) {
2768 l2cap_send_disconn_req(conn, sk);
2769 goto unlock;
2772 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2773 l2cap_pi(sk)->num_conf_rsp++;
2775 /* Reset config buffer. */
2776 l2cap_pi(sk)->conf_len = 0;
2778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2779 goto unlock;
2781 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2782 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2783 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2784 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2786 sk->sk_state = BT_CONNECTED;
2788 l2cap_pi(sk)->next_tx_seq = 0;
2789 l2cap_pi(sk)->expected_tx_seq = 0;
2790 __skb_queue_head_init(TX_QUEUE(sk));
2791 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2792 l2cap_ertm_init(sk);
2794 l2cap_chan_ready(sk);
2795 goto unlock;
2798 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2799 u8 buf[64];
2800 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2801 l2cap_build_conf_req(sk, buf), buf);
2802 l2cap_pi(sk)->num_conf_req++;
2805 unlock:
2806 bh_unlock_sock(sk);
2807 return 0;
2810 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2812 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2813 u16 scid, flags, result;
2814 struct sock *sk;
2816 scid = __le16_to_cpu(rsp->scid);
2817 flags = __le16_to_cpu(rsp->flags);
2818 result = __le16_to_cpu(rsp->result);
2820 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2821 scid, flags, result);
2823 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2824 if (!sk)
2825 return 0;
2827 switch (result) {
2828 case L2CAP_CONF_SUCCESS:
2829 break;
2831 case L2CAP_CONF_UNACCEPT:
2832 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2833 int len = cmd->len - sizeof(*rsp);
2834 char req[64];
2836 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2837 l2cap_send_disconn_req(conn, sk);
2838 goto done;
2841 /* throw out any old stored conf requests */
2842 result = L2CAP_CONF_SUCCESS;
2843 len = l2cap_parse_conf_rsp(sk, rsp->data,
2844 len, req, &result);
2845 if (len < 0) {
2846 l2cap_send_disconn_req(conn, sk);
2847 goto done;
2850 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2851 L2CAP_CONF_REQ, len, req);
2852 l2cap_pi(sk)->num_conf_req++;
2853 if (result != L2CAP_CONF_SUCCESS)
2854 goto done;
2855 break;
2858 default:
2859 sk->sk_state = BT_DISCONN;
2860 sk->sk_err = ECONNRESET;
2861 l2cap_sock_set_timer(sk, HZ * 5);
2862 l2cap_send_disconn_req(conn, sk);
2863 goto done;
2866 if (flags & 0x01)
2867 goto done;
2869 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2871 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2872 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2873 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2874 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2876 sk->sk_state = BT_CONNECTED;
2877 l2cap_pi(sk)->next_tx_seq = 0;
2878 l2cap_pi(sk)->expected_tx_seq = 0;
2879 __skb_queue_head_init(TX_QUEUE(sk));
2880 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2881 l2cap_ertm_init(sk);
2883 l2cap_chan_ready(sk);
2886 done:
2887 bh_unlock_sock(sk);
2888 return 0;
2891 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2893 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2894 struct l2cap_disconn_rsp rsp;
2895 u16 dcid, scid;
2896 struct sock *sk;
2898 scid = __le16_to_cpu(req->scid);
2899 dcid = __le16_to_cpu(req->dcid);
2901 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2903 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2904 if (!sk)
2905 return 0;
2907 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2908 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2909 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2911 sk->sk_shutdown = SHUTDOWN_MASK;
2913 skb_queue_purge(TX_QUEUE(sk));
2915 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2916 skb_queue_purge(SREJ_QUEUE(sk));
2917 del_timer(&l2cap_pi(sk)->retrans_timer);
2918 del_timer(&l2cap_pi(sk)->monitor_timer);
2921 l2cap_chan_del(sk, ECONNRESET);
2922 bh_unlock_sock(sk);
2924 l2cap_sock_kill(sk);
2925 return 0;
2928 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2930 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2931 u16 dcid, scid;
2932 struct sock *sk;
2934 scid = __le16_to_cpu(rsp->scid);
2935 dcid = __le16_to_cpu(rsp->dcid);
2937 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2939 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2940 if (!sk)
2941 return 0;
2943 skb_queue_purge(TX_QUEUE(sk));
2945 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2946 skb_queue_purge(SREJ_QUEUE(sk));
2947 del_timer(&l2cap_pi(sk)->retrans_timer);
2948 del_timer(&l2cap_pi(sk)->monitor_timer);
2951 l2cap_chan_del(sk, 0);
2952 bh_unlock_sock(sk);
2954 l2cap_sock_kill(sk);
2955 return 0;
2958 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2960 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2961 u16 type;
2963 type = __le16_to_cpu(req->type);
2965 BT_DBG("type 0x%4.4x", type);
2967 if (type == L2CAP_IT_FEAT_MASK) {
2968 u8 buf[8];
2969 u32 feat_mask = l2cap_feat_mask;
2970 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2971 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2972 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2973 if (enable_ertm)
2974 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2975 | L2CAP_FEAT_FCS;
2976 put_unaligned_le32(feat_mask, rsp->data);
2977 l2cap_send_cmd(conn, cmd->ident,
2978 L2CAP_INFO_RSP, sizeof(buf), buf);
2979 } else if (type == L2CAP_IT_FIXED_CHAN) {
2980 u8 buf[12];
2981 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2982 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2983 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2984 memcpy(buf + 4, l2cap_fixed_chan, 8);
2985 l2cap_send_cmd(conn, cmd->ident,
2986 L2CAP_INFO_RSP, sizeof(buf), buf);
2987 } else {
2988 struct l2cap_info_rsp rsp;
2989 rsp.type = cpu_to_le16(type);
2990 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2991 l2cap_send_cmd(conn, cmd->ident,
2992 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2995 return 0;
2998 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3000 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3001 u16 type, result;
3003 type = __le16_to_cpu(rsp->type);
3004 result = __le16_to_cpu(rsp->result);
3006 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3008 del_timer(&conn->info_timer);
3010 if (type == L2CAP_IT_FEAT_MASK) {
3011 conn->feat_mask = get_unaligned_le32(rsp->data);
3013 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3014 struct l2cap_info_req req;
3015 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3017 conn->info_ident = l2cap_get_ident(conn);
3019 l2cap_send_cmd(conn, conn->info_ident,
3020 L2CAP_INFO_REQ, sizeof(req), &req);
3021 } else {
3022 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3023 conn->info_ident = 0;
3025 l2cap_conn_start(conn);
3027 } else if (type == L2CAP_IT_FIXED_CHAN) {
3028 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3029 conn->info_ident = 0;
3031 l2cap_conn_start(conn);
3034 return 0;
3037 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3039 u8 *data = skb->data;
3040 int len = skb->len;
3041 struct l2cap_cmd_hdr cmd;
3042 int err = 0;
3044 l2cap_raw_recv(conn, skb);
3046 while (len >= L2CAP_CMD_HDR_SIZE) {
3047 u16 cmd_len;
3048 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3049 data += L2CAP_CMD_HDR_SIZE;
3050 len -= L2CAP_CMD_HDR_SIZE;
3052 cmd_len = le16_to_cpu(cmd.len);
3054 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3056 if (cmd_len > len || !cmd.ident) {
3057 BT_DBG("corrupted command");
3058 break;
3061 switch (cmd.code) {
3062 case L2CAP_COMMAND_REJ:
3063 l2cap_command_rej(conn, &cmd, data);
3064 break;
3066 case L2CAP_CONN_REQ:
3067 err = l2cap_connect_req(conn, &cmd, data);
3068 break;
3070 case L2CAP_CONN_RSP:
3071 err = l2cap_connect_rsp(conn, &cmd, data);
3072 break;
3074 case L2CAP_CONF_REQ:
3075 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3076 break;
3078 case L2CAP_CONF_RSP:
3079 err = l2cap_config_rsp(conn, &cmd, data);
3080 break;
3082 case L2CAP_DISCONN_REQ:
3083 err = l2cap_disconnect_req(conn, &cmd, data);
3084 break;
3086 case L2CAP_DISCONN_RSP:
3087 err = l2cap_disconnect_rsp(conn, &cmd, data);
3088 break;
3090 case L2CAP_ECHO_REQ:
3091 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3092 break;
3094 case L2CAP_ECHO_RSP:
3095 break;
3097 case L2CAP_INFO_REQ:
3098 err = l2cap_information_req(conn, &cmd, data);
3099 break;
3101 case L2CAP_INFO_RSP:
3102 err = l2cap_information_rsp(conn, &cmd, data);
3103 break;
3105 default:
3106 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3107 err = -EINVAL;
3108 break;
3111 if (err) {
3112 struct l2cap_cmd_rej rej;
3113 BT_DBG("error %d", err);
3115 /* FIXME: Map err to a valid reason */
3116 rej.reason = cpu_to_le16(0);
3117 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3120 data += cmd_len;
3121 len -= cmd_len;
3124 kfree_skb(skb);
3127 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3129 u16 our_fcs, rcv_fcs;
3130 int hdr_size = L2CAP_HDR_SIZE + 2;
3132 if (pi->fcs == L2CAP_FCS_CRC16) {
3133 skb_trim(skb, skb->len - 2);
3134 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3135 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3137 if (our_fcs != rcv_fcs)
3138 return -EINVAL;
3140 return 0;
3143 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3145 struct sk_buff *next_skb;
3147 bt_cb(skb)->tx_seq = tx_seq;
3148 bt_cb(skb)->sar = sar;
3150 next_skb = skb_peek(SREJ_QUEUE(sk));
3151 if (!next_skb) {
3152 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3153 return;
3156 do {
3157 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3158 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3159 return;
3162 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3163 break;
3165 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3167 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3170 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3172 struct l2cap_pinfo *pi = l2cap_pi(sk);
3173 struct sk_buff *_skb;
3174 int err = -EINVAL;
3176 switch (control & L2CAP_CTRL_SAR) {
3177 case L2CAP_SDU_UNSEGMENTED:
3178 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3179 kfree_skb(pi->sdu);
3180 break;
3183 err = sock_queue_rcv_skb(sk, skb);
3184 if (!err)
3185 return 0;
3187 break;
3189 case L2CAP_SDU_START:
3190 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3191 kfree_skb(pi->sdu);
3192 break;
3195 pi->sdu_len = get_unaligned_le16(skb->data);
3196 skb_pull(skb, 2);
3198 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3199 if (!pi->sdu) {
3200 err = -ENOMEM;
3201 break;
3204 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3206 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3207 pi->partial_sdu_len = skb->len;
3208 err = 0;
3209 break;
3211 case L2CAP_SDU_CONTINUE:
3212 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3213 break;
3215 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3217 pi->partial_sdu_len += skb->len;
3218 if (pi->partial_sdu_len > pi->sdu_len)
3219 kfree_skb(pi->sdu);
3220 else
3221 err = 0;
3223 break;
3225 case L2CAP_SDU_END:
3226 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3227 break;
3229 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3231 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3232 pi->partial_sdu_len += skb->len;
3234 if (pi->partial_sdu_len == pi->sdu_len) {
3235 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3236 err = sock_queue_rcv_skb(sk, _skb);
3237 if (err < 0)
3238 kfree_skb(_skb);
3240 kfree_skb(pi->sdu);
3241 err = 0;
3243 break;
3246 kfree_skb(skb);
3247 return err;
3250 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3252 struct sk_buff *skb;
3253 u16 control = 0;
3255 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3256 if (bt_cb(skb)->tx_seq != tx_seq)
3257 break;
3259 skb = skb_dequeue(SREJ_QUEUE(sk));
3260 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3261 l2cap_sar_reassembly_sdu(sk, skb, control);
3262 l2cap_pi(sk)->buffer_seq_srej =
3263 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3264 tx_seq++;
3268 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3270 struct l2cap_pinfo *pi = l2cap_pi(sk);
3271 struct srej_list *l, *tmp;
3272 u16 control;
3274 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3275 if (l->tx_seq == tx_seq) {
3276 list_del(&l->list);
3277 kfree(l);
3278 return;
3280 control = L2CAP_SUPER_SELECT_REJECT;
3281 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3282 l2cap_send_sframe(pi, control);
3283 list_del(&l->list);
3284 list_add_tail(&l->list, SREJ_LIST(sk));
3288 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3290 struct l2cap_pinfo *pi = l2cap_pi(sk);
3291 struct srej_list *new;
3292 u16 control;
3294 while (tx_seq != pi->expected_tx_seq) {
3295 control = L2CAP_SUPER_SELECT_REJECT;
3296 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3297 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3298 control |= L2CAP_CTRL_POLL;
3299 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3301 l2cap_send_sframe(pi, control);
3303 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3304 new->tx_seq = pi->expected_tx_seq++;
3305 list_add_tail(&new->list, SREJ_LIST(sk));
3307 pi->expected_tx_seq++;
3310 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3312 struct l2cap_pinfo *pi = l2cap_pi(sk);
3313 u8 tx_seq = __get_txseq(rx_control);
3314 u8 req_seq = __get_reqseq(rx_control);
3315 u16 tx_control = 0;
3316 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3317 int err = 0;
3319 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3321 pi->expected_ack_seq = req_seq;
3322 l2cap_drop_acked_frames(sk);
3324 if (tx_seq == pi->expected_tx_seq)
3325 goto expected;
3327 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3328 struct srej_list *first;
3330 first = list_first_entry(SREJ_LIST(sk),
3331 struct srej_list, list);
3332 if (tx_seq == first->tx_seq) {
3333 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3334 l2cap_check_srej_gap(sk, tx_seq);
3336 list_del(&first->list);
3337 kfree(first);
3339 if (list_empty(SREJ_LIST(sk))) {
3340 pi->buffer_seq = pi->buffer_seq_srej;
3341 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3343 } else {
3344 struct srej_list *l;
3345 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3347 list_for_each_entry(l, SREJ_LIST(sk), list) {
3348 if (l->tx_seq == tx_seq) {
3349 l2cap_resend_srejframe(sk, tx_seq);
3350 return 0;
3353 l2cap_send_srejframe(sk, tx_seq);
3355 } else {
3356 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3358 INIT_LIST_HEAD(SREJ_LIST(sk));
3359 pi->buffer_seq_srej = pi->buffer_seq;
3361 __skb_queue_head_init(SREJ_QUEUE(sk));
3362 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3364 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3366 l2cap_send_srejframe(sk, tx_seq);
3368 return 0;
3370 expected:
3371 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3373 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3374 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3375 return 0;
3378 if (rx_control & L2CAP_CTRL_FINAL) {
3379 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3380 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3381 else {
3382 sk->sk_send_head = TX_QUEUE(sk)->next;
3383 pi->next_tx_seq = pi->expected_ack_seq;
3384 l2cap_ertm_send(sk);
3388 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3390 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3391 if (err < 0)
3392 return err;
3394 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3395 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3396 tx_control |= L2CAP_SUPER_RCV_READY;
3397 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3398 l2cap_send_sframe(pi, tx_control);
3400 return 0;
3403 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3405 struct l2cap_pinfo *pi = l2cap_pi(sk);
3406 u8 tx_seq = __get_reqseq(rx_control);
3408 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3410 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3411 case L2CAP_SUPER_RCV_READY:
3412 if (rx_control & L2CAP_CTRL_POLL) {
3413 u16 control = L2CAP_CTRL_FINAL;
3414 control |= L2CAP_SUPER_RCV_READY |
3415 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3416 l2cap_send_sframe(l2cap_pi(sk), control);
3417 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3419 } else if (rx_control & L2CAP_CTRL_FINAL) {
3420 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3421 pi->expected_ack_seq = tx_seq;
3422 l2cap_drop_acked_frames(sk);
3424 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3425 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3426 else {
3427 sk->sk_send_head = TX_QUEUE(sk)->next;
3428 pi->next_tx_seq = pi->expected_ack_seq;
3429 l2cap_ertm_send(sk);
3432 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3433 break;
3435 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3436 del_timer(&pi->monitor_timer);
3438 if (pi->unacked_frames > 0)
3439 __mod_retrans_timer();
3440 } else {
3441 pi->expected_ack_seq = tx_seq;
3442 l2cap_drop_acked_frames(sk);
3444 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3445 (pi->unacked_frames > 0))
3446 __mod_retrans_timer();
3448 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3449 l2cap_ertm_send(sk);
3451 break;
3453 case L2CAP_SUPER_REJECT:
3454 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3456 pi->expected_ack_seq = __get_reqseq(rx_control);
3457 l2cap_drop_acked_frames(sk);
3459 if (rx_control & L2CAP_CTRL_FINAL) {
3460 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3461 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462 else {
3463 sk->sk_send_head = TX_QUEUE(sk)->next;
3464 pi->next_tx_seq = pi->expected_ack_seq;
3465 l2cap_ertm_send(sk);
3467 } else {
3468 sk->sk_send_head = TX_QUEUE(sk)->next;
3469 pi->next_tx_seq = pi->expected_ack_seq;
3470 l2cap_ertm_send(sk);
3472 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3473 pi->srej_save_reqseq = tx_seq;
3474 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3478 break;
3480 case L2CAP_SUPER_SELECT_REJECT:
3481 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3483 if (rx_control & L2CAP_CTRL_POLL) {
3484 pi->expected_ack_seq = tx_seq;
3485 l2cap_drop_acked_frames(sk);
3486 l2cap_retransmit_frame(sk, tx_seq);
3487 l2cap_ertm_send(sk);
3488 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3489 pi->srej_save_reqseq = tx_seq;
3490 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3492 } else if (rx_control & L2CAP_CTRL_FINAL) {
3493 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3494 pi->srej_save_reqseq == tx_seq)
3495 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3496 else
3497 l2cap_retransmit_frame(sk, tx_seq);
3499 else {
3500 l2cap_retransmit_frame(sk, tx_seq);
3501 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3502 pi->srej_save_reqseq = tx_seq;
3503 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3506 break;
3508 case L2CAP_SUPER_RCV_NOT_READY:
3509 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3510 pi->expected_ack_seq = tx_seq;
3511 l2cap_drop_acked_frames(sk);
3513 del_timer(&l2cap_pi(sk)->retrans_timer);
3514 if (rx_control & L2CAP_CTRL_POLL) {
3515 u16 control = L2CAP_CTRL_FINAL;
3516 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3518 break;
3521 return 0;
3524 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3526 struct sock *sk;
3527 struct l2cap_pinfo *pi;
3528 u16 control, len;
3529 u8 tx_seq;
3531 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3532 if (!sk) {
3533 BT_DBG("unknown cid 0x%4.4x", cid);
3534 goto drop;
3537 pi = l2cap_pi(sk);
3539 BT_DBG("sk %p, len %d", sk, skb->len);
3541 if (sk->sk_state != BT_CONNECTED)
3542 goto drop;
3544 switch (pi->mode) {
3545 case L2CAP_MODE_BASIC:
3546 /* If socket recv buffers overflows we drop data here
3547 * which is *bad* because L2CAP has to be reliable.
3548 * But we don't have any other choice. L2CAP doesn't
3549 * provide flow control mechanism. */
3551 if (pi->imtu < skb->len)
3552 goto drop;
3554 if (!sock_queue_rcv_skb(sk, skb))
3555 goto done;
3556 break;
3558 case L2CAP_MODE_ERTM:
3559 control = get_unaligned_le16(skb->data);
3560 skb_pull(skb, 2);
3561 len = skb->len;
3563 if (__is_sar_start(control))
3564 len -= 2;
3566 if (pi->fcs == L2CAP_FCS_CRC16)
3567 len -= 2;
3570 * We can just drop the corrupted I-frame here.
3571 * Receiver will miss it and start proper recovery
3572 * procedures and ask retransmission.
3574 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3575 goto drop;
3577 if (l2cap_check_fcs(pi, skb))
3578 goto drop;
3580 if (__is_iframe(control))
3581 l2cap_data_channel_iframe(sk, control, skb);
3582 else
3583 l2cap_data_channel_sframe(sk, control, skb);
3585 goto done;
3587 case L2CAP_MODE_STREAMING:
3588 control = get_unaligned_le16(skb->data);
3589 skb_pull(skb, 2);
3590 len = skb->len;
3592 if (__is_sar_start(control))
3593 len -= 2;
3595 if (pi->fcs == L2CAP_FCS_CRC16)
3596 len -= 2;
3598 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3599 goto drop;
3601 if (l2cap_check_fcs(pi, skb))
3602 goto drop;
3604 tx_seq = __get_txseq(control);
3606 if (pi->expected_tx_seq == tx_seq)
3607 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3608 else
3609 pi->expected_tx_seq = tx_seq + 1;
3611 l2cap_sar_reassembly_sdu(sk, skb, control);
3613 goto done;
3615 default:
3616 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3617 break;
3620 drop:
3621 kfree_skb(skb);
3623 done:
3624 if (sk)
3625 bh_unlock_sock(sk);
3627 return 0;
3630 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3632 struct sock *sk;
3634 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3635 if (!sk)
3636 goto drop;
3638 BT_DBG("sk %p, len %d", sk, skb->len);
3640 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3641 goto drop;
3643 if (l2cap_pi(sk)->imtu < skb->len)
3644 goto drop;
3646 if (!sock_queue_rcv_skb(sk, skb))
3647 goto done;
3649 drop:
3650 kfree_skb(skb);
3652 done:
3653 if (sk)
3654 bh_unlock_sock(sk);
3655 return 0;
3658 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3660 struct l2cap_hdr *lh = (void *) skb->data;
3661 u16 cid, len;
3662 __le16 psm;
3664 skb_pull(skb, L2CAP_HDR_SIZE);
3665 cid = __le16_to_cpu(lh->cid);
3666 len = __le16_to_cpu(lh->len);
3668 if (len != skb->len) {
3669 kfree_skb(skb);
3670 return;
3673 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3675 switch (cid) {
3676 case L2CAP_CID_SIGNALING:
3677 l2cap_sig_channel(conn, skb);
3678 break;
3680 case L2CAP_CID_CONN_LESS:
3681 psm = get_unaligned_le16(skb->data);
3682 skb_pull(skb, 2);
3683 l2cap_conless_channel(conn, psm, skb);
3684 break;
3686 default:
3687 l2cap_data_channel(conn, cid, skb);
3688 break;
3692 /* ---- L2CAP interface with lower layer (HCI) ---- */
3694 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3696 int exact = 0, lm1 = 0, lm2 = 0;
3697 register struct sock *sk;
3698 struct hlist_node *node;
3700 if (type != ACL_LINK)
3701 return 0;
3703 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3705 /* Find listening sockets and check their link_mode */
3706 read_lock(&l2cap_sk_list.lock);
3707 sk_for_each(sk, node, &l2cap_sk_list.head) {
3708 if (sk->sk_state != BT_LISTEN)
3709 continue;
3711 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3712 lm1 |= HCI_LM_ACCEPT;
3713 if (l2cap_pi(sk)->role_switch)
3714 lm1 |= HCI_LM_MASTER;
3715 exact++;
3716 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3717 lm2 |= HCI_LM_ACCEPT;
3718 if (l2cap_pi(sk)->role_switch)
3719 lm2 |= HCI_LM_MASTER;
3722 read_unlock(&l2cap_sk_list.lock);
3724 return exact ? lm1 : lm2;
3727 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3729 struct l2cap_conn *conn;
3731 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3733 if (hcon->type != ACL_LINK)
3734 return 0;
3736 if (!status) {
3737 conn = l2cap_conn_add(hcon, status);
3738 if (conn)
3739 l2cap_conn_ready(conn);
3740 } else
3741 l2cap_conn_del(hcon, bt_err(status));
3743 return 0;
3746 static int l2cap_disconn_ind(struct hci_conn *hcon)
3748 struct l2cap_conn *conn = hcon->l2cap_data;
3750 BT_DBG("hcon %p", hcon);
3752 if (hcon->type != ACL_LINK || !conn)
3753 return 0x13;
3755 return conn->disc_reason;
3758 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3760 BT_DBG("hcon %p reason %d", hcon, reason);
3762 if (hcon->type != ACL_LINK)
3763 return 0;
3765 l2cap_conn_del(hcon, bt_err(reason));
3767 return 0;
3770 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3772 if (sk->sk_type != SOCK_SEQPACKET)
3773 return;
3775 if (encrypt == 0x00) {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3777 l2cap_sock_clear_timer(sk);
3778 l2cap_sock_set_timer(sk, HZ * 5);
3779 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3780 __l2cap_sock_close(sk, ECONNREFUSED);
3781 } else {
3782 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3783 l2cap_sock_clear_timer(sk);
3787 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3789 struct l2cap_chan_list *l;
3790 struct l2cap_conn *conn = hcon->l2cap_data;
3791 struct sock *sk;
3793 if (!conn)
3794 return 0;
3796 l = &conn->chan_list;
3798 BT_DBG("conn %p", conn);
3800 read_lock(&l->lock);
3802 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3803 bh_lock_sock(sk);
3805 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3806 bh_unlock_sock(sk);
3807 continue;
3810 if (!status && (sk->sk_state == BT_CONNECTED ||
3811 sk->sk_state == BT_CONFIG)) {
3812 l2cap_check_encryption(sk, encrypt);
3813 bh_unlock_sock(sk);
3814 continue;
3817 if (sk->sk_state == BT_CONNECT) {
3818 if (!status) {
3819 struct l2cap_conn_req req;
3820 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3821 req.psm = l2cap_pi(sk)->psm;
3823 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3825 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3826 L2CAP_CONN_REQ, sizeof(req), &req);
3827 } else {
3828 l2cap_sock_clear_timer(sk);
3829 l2cap_sock_set_timer(sk, HZ / 10);
3831 } else if (sk->sk_state == BT_CONNECT2) {
3832 struct l2cap_conn_rsp rsp;
3833 __u16 result;
3835 if (!status) {
3836 sk->sk_state = BT_CONFIG;
3837 result = L2CAP_CR_SUCCESS;
3838 } else {
3839 sk->sk_state = BT_DISCONN;
3840 l2cap_sock_set_timer(sk, HZ / 10);
3841 result = L2CAP_CR_SEC_BLOCK;
3844 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3845 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3846 rsp.result = cpu_to_le16(result);
3847 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3848 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3849 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3852 bh_unlock_sock(sk);
3855 read_unlock(&l->lock);
3857 return 0;
3860 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3862 struct l2cap_conn *conn = hcon->l2cap_data;
3864 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3865 goto drop;
3867 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3869 if (flags & ACL_START) {
3870 struct l2cap_hdr *hdr;
3871 int len;
3873 if (conn->rx_len) {
3874 BT_ERR("Unexpected start frame (len %d)", skb->len);
3875 kfree_skb(conn->rx_skb);
3876 conn->rx_skb = NULL;
3877 conn->rx_len = 0;
3878 l2cap_conn_unreliable(conn, ECOMM);
3881 if (skb->len < 2) {
3882 BT_ERR("Frame is too short (len %d)", skb->len);
3883 l2cap_conn_unreliable(conn, ECOMM);
3884 goto drop;
3887 hdr = (struct l2cap_hdr *) skb->data;
3888 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3890 if (len == skb->len) {
3891 /* Complete frame received */
3892 l2cap_recv_frame(conn, skb);
3893 return 0;
3896 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3898 if (skb->len > len) {
3899 BT_ERR("Frame is too long (len %d, expected len %d)",
3900 skb->len, len);
3901 l2cap_conn_unreliable(conn, ECOMM);
3902 goto drop;
3905 /* Allocate skb for the complete frame (with header) */
3906 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3907 if (!conn->rx_skb)
3908 goto drop;
3910 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3911 skb->len);
3912 conn->rx_len = len - skb->len;
3913 } else {
3914 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3916 if (!conn->rx_len) {
3917 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3918 l2cap_conn_unreliable(conn, ECOMM);
3919 goto drop;
3922 if (skb->len > conn->rx_len) {
3923 BT_ERR("Fragment is too long (len %d, expected %d)",
3924 skb->len, conn->rx_len);
3925 kfree_skb(conn->rx_skb);
3926 conn->rx_skb = NULL;
3927 conn->rx_len = 0;
3928 l2cap_conn_unreliable(conn, ECOMM);
3929 goto drop;
3932 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3933 skb->len);
3934 conn->rx_len -= skb->len;
3936 if (!conn->rx_len) {
3937 /* Complete frame received */
3938 l2cap_recv_frame(conn, conn->rx_skb);
3939 conn->rx_skb = NULL;
3943 drop:
3944 kfree_skb(skb);
3945 return 0;
3948 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3950 struct sock *sk;
3951 struct hlist_node *node;
3953 read_lock_bh(&l2cap_sk_list.lock);
3955 sk_for_each(sk, node, &l2cap_sk_list.head) {
3956 struct l2cap_pinfo *pi = l2cap_pi(sk);
3958 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3959 batostr(&bt_sk(sk)->src),
3960 batostr(&bt_sk(sk)->dst),
3961 sk->sk_state, __le16_to_cpu(pi->psm),
3962 pi->scid, pi->dcid,
3963 pi->imtu, pi->omtu, pi->sec_level);
3966 read_unlock_bh(&l2cap_sk_list.lock);
3968 return 0;
3971 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3973 return single_open(file, l2cap_debugfs_show, inode->i_private);
3976 static const struct file_operations l2cap_debugfs_fops = {
3977 .open = l2cap_debugfs_open,
3978 .read = seq_read,
3979 .llseek = seq_lseek,
3980 .release = single_release,
3983 static struct dentry *l2cap_debugfs;
3985 static const struct proto_ops l2cap_sock_ops = {
3986 .family = PF_BLUETOOTH,
3987 .owner = THIS_MODULE,
3988 .release = l2cap_sock_release,
3989 .bind = l2cap_sock_bind,
3990 .connect = l2cap_sock_connect,
3991 .listen = l2cap_sock_listen,
3992 .accept = l2cap_sock_accept,
3993 .getname = l2cap_sock_getname,
3994 .sendmsg = l2cap_sock_sendmsg,
3995 .recvmsg = l2cap_sock_recvmsg,
3996 .poll = bt_sock_poll,
3997 .ioctl = bt_sock_ioctl,
3998 .mmap = sock_no_mmap,
3999 .socketpair = sock_no_socketpair,
4000 .shutdown = l2cap_sock_shutdown,
4001 .setsockopt = l2cap_sock_setsockopt,
4002 .getsockopt = l2cap_sock_getsockopt
4005 static const struct net_proto_family l2cap_sock_family_ops = {
4006 .family = PF_BLUETOOTH,
4007 .owner = THIS_MODULE,
4008 .create = l2cap_sock_create,
4011 static struct hci_proto l2cap_hci_proto = {
4012 .name = "L2CAP",
4013 .id = HCI_PROTO_L2CAP,
4014 .connect_ind = l2cap_connect_ind,
4015 .connect_cfm = l2cap_connect_cfm,
4016 .disconn_ind = l2cap_disconn_ind,
4017 .disconn_cfm = l2cap_disconn_cfm,
4018 .security_cfm = l2cap_security_cfm,
4019 .recv_acldata = l2cap_recv_acldata
4022 static int __init l2cap_init(void)
4024 int err;
4026 err = proto_register(&l2cap_proto, 0);
4027 if (err < 0)
4028 return err;
4030 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4031 if (err < 0) {
4032 BT_ERR("L2CAP socket registration failed");
4033 goto error;
4036 err = hci_register_proto(&l2cap_hci_proto);
4037 if (err < 0) {
4038 BT_ERR("L2CAP protocol registration failed");
4039 bt_sock_unregister(BTPROTO_L2CAP);
4040 goto error;
4043 if (bt_debugfs) {
4044 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4045 bt_debugfs, NULL, &l2cap_debugfs_fops);
4046 if (!l2cap_debugfs)
4047 BT_ERR("Failed to create L2CAP debug file");
4050 BT_INFO("L2CAP ver %s", VERSION);
4051 BT_INFO("L2CAP socket layer initialized");
4053 return 0;
4055 error:
4056 proto_unregister(&l2cap_proto);
4057 return err;
4060 static void __exit l2cap_exit(void)
4062 debugfs_remove(l2cap_debugfs);
4064 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4065 BT_ERR("L2CAP socket unregistration failed");
4067 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4068 BT_ERR("L2CAP protocol unregistration failed");
4070 proto_unregister(&l2cap_proto);
4073 void l2cap_load(void)
4075 /* Dummy function to trigger automatic L2CAP module loading by
4076 * other modules that use L2CAP sockets but don't use any other
4077 * symbols from it. */
4078 return;
4080 EXPORT_SYMBOL(l2cap_load);
4082 module_init(l2cap_init);
4083 module_exit(l2cap_exit);
4085 module_param(enable_ertm, bool, 0644);
4086 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4088 module_param(max_transmit, uint, 0644);
4089 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4091 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4092 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4093 MODULE_VERSION(VERSION);
4094 MODULE_LICENSE("GPL");
4095 MODULE_ALIAS("bt-proto-0");