2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm
= 0;
58 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
59 static u8 l2cap_fixed_chan
[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops
;
63 static struct bt_sock_list l2cap_sk_list
= {
64 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
67 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
68 static void l2cap_sock_close(struct sock
*sk
);
69 static void l2cap_sock_kill(struct sock
*sk
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg
)
77 struct sock
*sk
= (struct sock
*) arg
;
80 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
84 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
85 reason
= ECONNREFUSED
;
86 else if (sk
->sk_state
== BT_CONNECT
&&
87 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
88 reason
= ECONNREFUSED
;
92 __l2cap_sock_close(sk
, reason
);
100 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
102 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
103 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
106 static void l2cap_sock_clear_timer(struct sock
*sk
)
108 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
109 sk_stop_timer(sk
, &sk
->sk_timer
);
112 /* ---- L2CAP channels ---- */
113 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
116 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
117 if (l2cap_pi(s
)->dcid
== cid
)
123 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
126 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
127 if (l2cap_pi(s
)->scid
== cid
)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
139 s
= __l2cap_get_chan_by_scid(l
, cid
);
142 read_unlock(&l
->lock
);
146 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
149 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
150 if (l2cap_pi(s
)->ident
== ident
)
156 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
160 s
= __l2cap_get_chan_by_ident(l
, ident
);
163 read_unlock(&l
->lock
);
167 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
169 u16 cid
= L2CAP_CID_DYN_START
;
171 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
172 if (!__l2cap_get_chan_by_scid(l
, cid
))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
184 l2cap_pi(l
->head
)->prev_c
= sk
;
186 l2cap_pi(sk
)->next_c
= l
->head
;
187 l2cap_pi(sk
)->prev_c
= NULL
;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
193 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
195 write_lock_bh(&l
->lock
);
200 l2cap_pi(next
)->prev_c
= prev
;
202 l2cap_pi(prev
)->next_c
= next
;
203 write_unlock_bh(&l
->lock
);
208 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
210 struct l2cap_chan_list
*l
= &conn
->chan_list
;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
213 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
215 conn
->disc_reason
= 0x13;
217 l2cap_pi(sk
)->conn
= conn
;
219 if (sk
->sk_type
== SOCK_SEQPACKET
) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
222 } else if (sk
->sk_type
== SOCK_DGRAM
) {
223 /* Connectionless socket */
224 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
225 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
226 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
230 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
231 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
234 __l2cap_chan_link(l
, sk
);
237 bt_accept_enqueue(parent
, sk
);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock
*sk
, int err
)
244 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
245 struct sock
*parent
= bt_sk(sk
)->parent
;
247 l2cap_sock_clear_timer(sk
);
249 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn
->chan_list
, sk
);
254 l2cap_pi(sk
)->conn
= NULL
;
255 hci_conn_put(conn
->hcon
);
258 sk
->sk_state
= BT_CLOSED
;
259 sock_set_flag(sk
, SOCK_ZAPPED
);
265 bt_accept_unlink(sk
);
266 parent
->sk_data_ready(parent
, 0);
268 sk
->sk_state_change(sk
);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock
*sk
)
274 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
277 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
279 auth_type
= HCI_AT_NO_BONDING_MITM
;
281 auth_type
= HCI_AT_NO_BONDING
;
283 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
284 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
286 switch (l2cap_pi(sk
)->sec_level
) {
287 case BT_SECURITY_HIGH
:
288 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
290 case BT_SECURITY_MEDIUM
:
291 auth_type
= HCI_AT_GENERAL_BONDING
;
294 auth_type
= HCI_AT_NO_BONDING
;
299 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
303 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn
->lock
);
315 if (++conn
->tx_ident
> 128)
320 spin_unlock_bh(&conn
->lock
);
325 static inline int l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
327 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
329 BT_DBG("code 0x%2.2x", code
);
334 return hci_send_acl(conn
->hcon
, skb
, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
340 struct l2cap_hdr
*lh
;
341 struct l2cap_conn
*conn
= pi
->conn
;
342 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
344 if (pi
->fcs
== L2CAP_FCS_CRC16
)
347 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
349 count
= min_t(unsigned int, conn
->mtu
, hlen
);
350 control
|= L2CAP_CTRL_FRAME_TYPE
;
352 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
356 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
357 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
358 lh
->cid
= cpu_to_le16(pi
->dcid
);
359 put_unaligned_le16(control
, skb_put(skb
, 2));
361 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
362 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
363 put_unaligned_le16(fcs
, skb_put(skb
, 2));
366 return hci_send_acl(pi
->conn
->hcon
, skb
, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
371 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
372 control
|= L2CAP_SUPER_RCV_NOT_READY
;
374 control
|= L2CAP_SUPER_RCV_READY
;
376 return l2cap_send_sframe(pi
, control
);
379 static void l2cap_do_start(struct sock
*sk
)
381 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
383 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
384 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
387 if (l2cap_check_security(sk
)) {
388 struct l2cap_conn_req req
;
389 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
390 req
.psm
= l2cap_pi(sk
)->psm
;
392 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
394 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
395 L2CAP_CONN_REQ
, sizeof(req
), &req
);
398 struct l2cap_info_req req
;
399 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
401 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
402 conn
->info_ident
= l2cap_get_ident(conn
);
404 mod_timer(&conn
->info_timer
, jiffies
+
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
407 l2cap_send_cmd(conn
, conn
->info_ident
,
408 L2CAP_INFO_REQ
, sizeof(req
), &req
);
412 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
414 struct l2cap_disconn_req req
;
416 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
417 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
418 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
419 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn
*conn
)
425 struct l2cap_chan_list
*l
= &conn
->chan_list
;
428 BT_DBG("conn %p", conn
);
432 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
435 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
440 if (sk
->sk_state
== BT_CONNECT
) {
441 if (l2cap_check_security(sk
)) {
442 struct l2cap_conn_req req
;
443 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
444 req
.psm
= l2cap_pi(sk
)->psm
;
446 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
448 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
449 L2CAP_CONN_REQ
, sizeof(req
), &req
);
451 } else if (sk
->sk_state
== BT_CONNECT2
) {
452 struct l2cap_conn_rsp rsp
;
453 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
454 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
456 if (l2cap_check_security(sk
)) {
457 if (bt_sk(sk
)->defer_setup
) {
458 struct sock
*parent
= bt_sk(sk
)->parent
;
459 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
460 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
461 parent
->sk_data_ready(parent
, 0);
464 sk
->sk_state
= BT_CONFIG
;
465 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
466 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
469 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
470 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
473 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
474 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
480 read_unlock(&l
->lock
);
483 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
485 struct l2cap_chan_list
*l
= &conn
->chan_list
;
488 BT_DBG("conn %p", conn
);
492 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
495 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
496 l2cap_sock_clear_timer(sk
);
497 sk
->sk_state
= BT_CONNECTED
;
498 sk
->sk_state_change(sk
);
499 } else if (sk
->sk_state
== BT_CONNECT
)
505 read_unlock(&l
->lock
);
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
511 struct l2cap_chan_list
*l
= &conn
->chan_list
;
514 BT_DBG("conn %p", conn
);
518 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
519 if (l2cap_pi(sk
)->force_reliable
)
523 read_unlock(&l
->lock
);
526 static void l2cap_info_timeout(unsigned long arg
)
528 struct l2cap_conn
*conn
= (void *) arg
;
530 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
531 conn
->info_ident
= 0;
533 l2cap_conn_start(conn
);
536 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
538 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
543 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
547 hcon
->l2cap_data
= conn
;
550 BT_DBG("hcon %p conn %p", hcon
, conn
);
552 conn
->mtu
= hcon
->hdev
->acl_mtu
;
553 conn
->src
= &hcon
->hdev
->bdaddr
;
554 conn
->dst
= &hcon
->dst
;
558 spin_lock_init(&conn
->lock
);
559 rwlock_init(&conn
->chan_list
.lock
);
561 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
562 (unsigned long) conn
);
564 conn
->disc_reason
= 0x13;
569 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
571 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
577 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
579 kfree_skb(conn
->rx_skb
);
582 while ((sk
= conn
->chan_list
.head
)) {
584 l2cap_chan_del(sk
, err
);
589 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
590 del_timer_sync(&conn
->info_timer
);
592 hcon
->l2cap_data
= NULL
;
596 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
598 struct l2cap_chan_list
*l
= &conn
->chan_list
;
599 write_lock_bh(&l
->lock
);
600 __l2cap_chan_add(conn
, sk
, parent
);
601 write_unlock_bh(&l
->lock
);
604 /* ---- Socket interface ---- */
605 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
608 struct hlist_node
*node
;
609 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
610 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
620 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
622 struct sock
*sk
= NULL
, *sk1
= NULL
;
623 struct hlist_node
*node
;
625 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
626 if (state
&& sk
->sk_state
!= state
)
629 if (l2cap_pi(sk
)->psm
== psm
) {
631 if (!bacmp(&bt_sk(sk
)->src
, src
))
635 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
639 return node
? sk
: sk1
;
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
647 read_lock(&l2cap_sk_list
.lock
);
648 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
651 read_unlock(&l2cap_sk_list
.lock
);
655 static void l2cap_sock_destruct(struct sock
*sk
)
659 skb_queue_purge(&sk
->sk_receive_queue
);
660 skb_queue_purge(&sk
->sk_write_queue
);
663 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
667 BT_DBG("parent %p", parent
);
669 /* Close not yet accepted channels */
670 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
671 l2cap_sock_close(sk
);
673 parent
->sk_state
= BT_CLOSED
;
674 sock_set_flag(parent
, SOCK_ZAPPED
);
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
680 static void l2cap_sock_kill(struct sock
*sk
)
682 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
685 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list
, sk
);
689 sock_set_flag(sk
, SOCK_DEAD
);
693 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
695 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
697 switch (sk
->sk_state
) {
699 l2cap_sock_cleanup_listen(sk
);
704 if (sk
->sk_type
== SOCK_SEQPACKET
) {
705 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
707 sk
->sk_state
= BT_DISCONN
;
708 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
709 l2cap_send_disconn_req(conn
, sk
);
711 l2cap_chan_del(sk
, reason
);
715 if (sk
->sk_type
== SOCK_SEQPACKET
) {
716 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
717 struct l2cap_conn_rsp rsp
;
720 if (bt_sk(sk
)->defer_setup
)
721 result
= L2CAP_CR_SEC_BLOCK
;
723 result
= L2CAP_CR_BAD_PSM
;
725 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
726 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
727 rsp
.result
= cpu_to_le16(result
);
728 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
729 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
730 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
732 l2cap_chan_del(sk
, reason
);
737 l2cap_chan_del(sk
, reason
);
741 sock_set_flag(sk
, SOCK_ZAPPED
);
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock
*sk
)
749 l2cap_sock_clear_timer(sk
);
751 __l2cap_sock_close(sk
, ECONNRESET
);
756 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
758 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
763 sk
->sk_type
= parent
->sk_type
;
764 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
766 pi
->imtu
= l2cap_pi(parent
)->imtu
;
767 pi
->omtu
= l2cap_pi(parent
)->omtu
;
768 pi
->mode
= l2cap_pi(parent
)->mode
;
769 pi
->fcs
= l2cap_pi(parent
)->fcs
;
770 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
771 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
772 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
774 pi
->imtu
= L2CAP_DEFAULT_MTU
;
776 pi
->mode
= L2CAP_MODE_BASIC
;
777 pi
->fcs
= L2CAP_FCS_CRC16
;
778 pi
->sec_level
= BT_SECURITY_LOW
;
780 pi
->force_reliable
= 0;
783 /* Default config options */
785 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
786 skb_queue_head_init(TX_QUEUE(sk
));
787 skb_queue_head_init(SREJ_QUEUE(sk
));
788 INIT_LIST_HEAD(SREJ_LIST(sk
));
791 static struct proto l2cap_proto
= {
793 .owner
= THIS_MODULE
,
794 .obj_size
= sizeof(struct l2cap_pinfo
)
797 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
801 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
805 sock_init_data(sock
, sk
);
806 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
808 sk
->sk_destruct
= l2cap_sock_destruct
;
809 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
811 sock_reset_flag(sk
, SOCK_ZAPPED
);
813 sk
->sk_protocol
= proto
;
814 sk
->sk_state
= BT_OPEN
;
816 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
818 bt_sock_link(&l2cap_sk_list
, sk
);
822 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
)
826 BT_DBG("sock %p", sock
);
828 sock
->state
= SS_UNCONNECTED
;
830 if (sock
->type
!= SOCK_SEQPACKET
&&
831 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
832 return -ESOCKTNOSUPPORT
;
834 if (sock
->type
== SOCK_RAW
&& !capable(CAP_NET_RAW
))
837 sock
->ops
= &l2cap_sock_ops
;
839 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
843 l2cap_sock_init(sk
, NULL
);
847 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
849 struct sock
*sk
= sock
->sk
;
850 struct sockaddr_l2 la
;
855 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
858 memset(&la
, 0, sizeof(la
));
859 len
= min_t(unsigned int, sizeof(la
), alen
);
860 memcpy(&la
, addr
, len
);
867 if (sk
->sk_state
!= BT_OPEN
) {
872 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
873 !capable(CAP_NET_BIND_SERVICE
)) {
878 write_lock_bh(&l2cap_sk_list
.lock
);
880 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
883 /* Save source address */
884 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
885 l2cap_pi(sk
)->psm
= la
.l2_psm
;
886 l2cap_pi(sk
)->sport
= la
.l2_psm
;
887 sk
->sk_state
= BT_BOUND
;
889 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
890 __le16_to_cpu(la
.l2_psm
) == 0x0003)
891 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
894 write_unlock_bh(&l2cap_sk_list
.lock
);
901 static int l2cap_do_connect(struct sock
*sk
)
903 bdaddr_t
*src
= &bt_sk(sk
)->src
;
904 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
905 struct l2cap_conn
*conn
;
906 struct hci_conn
*hcon
;
907 struct hci_dev
*hdev
;
911 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
914 hdev
= hci_get_route(dst
, src
);
916 return -EHOSTUNREACH
;
918 hci_dev_lock_bh(hdev
);
922 if (sk
->sk_type
== SOCK_RAW
) {
923 switch (l2cap_pi(sk
)->sec_level
) {
924 case BT_SECURITY_HIGH
:
925 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
927 case BT_SECURITY_MEDIUM
:
928 auth_type
= HCI_AT_DEDICATED_BONDING
;
931 auth_type
= HCI_AT_NO_BONDING
;
934 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
935 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
936 auth_type
= HCI_AT_NO_BONDING_MITM
;
938 auth_type
= HCI_AT_NO_BONDING
;
940 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
941 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
943 switch (l2cap_pi(sk
)->sec_level
) {
944 case BT_SECURITY_HIGH
:
945 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
947 case BT_SECURITY_MEDIUM
:
948 auth_type
= HCI_AT_GENERAL_BONDING
;
951 auth_type
= HCI_AT_NO_BONDING
;
956 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
957 l2cap_pi(sk
)->sec_level
, auth_type
);
961 conn
= l2cap_conn_add(hcon
, 0);
969 /* Update source addr of the socket */
970 bacpy(src
, conn
->src
);
972 l2cap_chan_add(conn
, sk
, NULL
);
974 sk
->sk_state
= BT_CONNECT
;
975 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
977 if (hcon
->state
== BT_CONNECTED
) {
978 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
979 l2cap_sock_clear_timer(sk
);
980 sk
->sk_state
= BT_CONNECTED
;
986 hci_dev_unlock_bh(hdev
);
991 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
993 struct sock
*sk
= sock
->sk
;
994 struct sockaddr_l2 la
;
999 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
1002 memset(&la
, 0, sizeof(la
));
1003 len
= min_t(unsigned int, sizeof(la
), alen
);
1004 memcpy(&la
, addr
, len
);
1011 if (sk
->sk_type
== SOCK_SEQPACKET
&& !la
.l2_psm
) {
1016 switch (l2cap_pi(sk
)->mode
) {
1017 case L2CAP_MODE_BASIC
:
1019 case L2CAP_MODE_ERTM
:
1020 case L2CAP_MODE_STREAMING
:
1029 switch (sk
->sk_state
) {
1033 /* Already connecting */
1037 /* Already connected */
1050 /* Set destination address and psm */
1051 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1052 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1054 err
= l2cap_do_connect(sk
);
1059 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1060 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1066 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1068 struct sock
*sk
= sock
->sk
;
1071 BT_DBG("sk %p backlog %d", sk
, backlog
);
1075 if (sk
->sk_state
!= BT_BOUND
|| sock
->type
!= SOCK_SEQPACKET
) {
1080 switch (l2cap_pi(sk
)->mode
) {
1081 case L2CAP_MODE_BASIC
:
1083 case L2CAP_MODE_ERTM
:
1084 case L2CAP_MODE_STREAMING
:
1093 if (!l2cap_pi(sk
)->psm
) {
1094 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1099 write_lock_bh(&l2cap_sk_list
.lock
);
1101 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1102 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1103 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1104 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1109 write_unlock_bh(&l2cap_sk_list
.lock
);
1115 sk
->sk_max_ack_backlog
= backlog
;
1116 sk
->sk_ack_backlog
= 0;
1117 sk
->sk_state
= BT_LISTEN
;
1124 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1126 DECLARE_WAITQUEUE(wait
, current
);
1127 struct sock
*sk
= sock
->sk
, *nsk
;
1131 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1133 if (sk
->sk_state
!= BT_LISTEN
) {
1138 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1140 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1142 /* Wait for an incoming connection. (wake-one). */
1143 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
1144 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1145 set_current_state(TASK_INTERRUPTIBLE
);
1152 timeo
= schedule_timeout(timeo
);
1153 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1155 if (sk
->sk_state
!= BT_LISTEN
) {
1160 if (signal_pending(current
)) {
1161 err
= sock_intr_errno(timeo
);
1165 set_current_state(TASK_RUNNING
);
1166 remove_wait_queue(sk
->sk_sleep
, &wait
);
1171 newsock
->state
= SS_CONNECTED
;
1173 BT_DBG("new socket %p", nsk
);
1180 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1182 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1183 struct sock
*sk
= sock
->sk
;
1185 BT_DBG("sock %p, sk %p", sock
, sk
);
1187 addr
->sa_family
= AF_BLUETOOTH
;
1188 *len
= sizeof(struct sockaddr_l2
);
1191 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1192 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1193 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1195 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1196 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1197 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1203 static void l2cap_monitor_timeout(unsigned long arg
)
1205 struct sock
*sk
= (void *) arg
;
1209 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1210 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1214 l2cap_pi(sk
)->retry_count
++;
1215 __mod_monitor_timer();
1217 control
= L2CAP_CTRL_POLL
;
1218 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1222 static void l2cap_retrans_timeout(unsigned long arg
)
1224 struct sock
*sk
= (void *) arg
;
1228 l2cap_pi(sk
)->retry_count
= 1;
1229 __mod_monitor_timer();
1231 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1233 control
= L2CAP_CTRL_POLL
;
1234 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
1238 static void l2cap_drop_acked_frames(struct sock
*sk
)
1240 struct sk_buff
*skb
;
1242 while ((skb
= skb_peek(TX_QUEUE(sk
)))) {
1243 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1246 skb
= skb_dequeue(TX_QUEUE(sk
));
1249 l2cap_pi(sk
)->unacked_frames
--;
1252 if (!l2cap_pi(sk
)->unacked_frames
)
1253 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1258 static inline int l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1260 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1263 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1265 err
= hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1272 static int l2cap_streaming_send(struct sock
*sk
)
1274 struct sk_buff
*skb
, *tx_skb
;
1275 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1279 while ((skb
= sk
->sk_send_head
)) {
1280 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1282 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1283 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1284 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1286 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1287 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1288 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1291 err
= l2cap_do_send(sk
, tx_skb
);
1293 l2cap_send_disconn_req(pi
->conn
, sk
);
1297 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1299 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1300 sk
->sk_send_head
= NULL
;
1302 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1304 skb
= skb_dequeue(TX_QUEUE(sk
));
1310 static int l2cap_retransmit_frame(struct sock
*sk
, u8 tx_seq
)
1312 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1313 struct sk_buff
*skb
, *tx_skb
;
1317 skb
= skb_peek(TX_QUEUE(sk
));
1319 if (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1320 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1322 skb
= skb_queue_next(TX_QUEUE(sk
), skb
);
1326 if (pi
->remote_max_tx
&&
1327 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1328 l2cap_send_disconn_req(pi
->conn
, sk
);
1332 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1333 bt_cb(skb
)->retries
++;
1334 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1335 control
|= (pi
->req_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1336 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1337 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1339 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1340 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1341 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1344 err
= l2cap_do_send(sk
, tx_skb
);
1346 l2cap_send_disconn_req(pi
->conn
, sk
);
1354 static int l2cap_ertm_send(struct sock
*sk
)
1356 struct sk_buff
*skb
, *tx_skb
;
1357 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1361 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
1364 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))
1365 && !(pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)) {
1366 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1368 if (pi
->remote_max_tx
&&
1369 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1370 l2cap_send_disconn_req(pi
->conn
, sk
);
1374 bt_cb(skb
)->retries
++;
1376 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1377 control
|= (pi
->req_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1378 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1379 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1382 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
) {
1383 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1384 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1387 err
= l2cap_do_send(sk
, tx_skb
);
1389 l2cap_send_disconn_req(pi
->conn
, sk
);
1392 __mod_retrans_timer();
1394 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1395 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1397 pi
->unacked_frames
++;
1399 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1400 sk
->sk_send_head
= NULL
;
1402 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1408 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1410 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1411 struct sk_buff
**frag
;
1414 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
)) {
1421 /* Continuation fragments (no L2CAP header) */
1422 frag
= &skb_shinfo(skb
)->frag_list
;
1424 count
= min_t(unsigned int, conn
->mtu
, len
);
1426 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1429 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1435 frag
= &(*frag
)->next
;
1441 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1443 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1444 struct sk_buff
*skb
;
1445 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1446 struct l2cap_hdr
*lh
;
1448 BT_DBG("sk %p len %d", sk
, (int)len
);
1450 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1451 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1452 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1454 return ERR_PTR(-ENOMEM
);
1456 /* Create L2CAP header */
1457 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1458 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1459 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1460 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1462 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1463 if (unlikely(err
< 0)) {
1465 return ERR_PTR(err
);
1470 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1472 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1473 struct sk_buff
*skb
;
1474 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1475 struct l2cap_hdr
*lh
;
1477 BT_DBG("sk %p len %d", sk
, (int)len
);
1479 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1480 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1481 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1483 return ERR_PTR(-ENOMEM
);
1485 /* Create L2CAP header */
1486 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1487 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1488 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1490 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1491 if (unlikely(err
< 0)) {
1493 return ERR_PTR(err
);
1498 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1500 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1501 struct sk_buff
*skb
;
1502 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1503 struct l2cap_hdr
*lh
;
1505 BT_DBG("sk %p len %d", sk
, (int)len
);
1510 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1513 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1514 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1515 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1517 return ERR_PTR(-ENOMEM
);
1519 /* Create L2CAP header */
1520 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1521 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1522 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1523 put_unaligned_le16(control
, skb_put(skb
, 2));
1525 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1527 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1528 if (unlikely(err
< 0)) {
1530 return ERR_PTR(err
);
1533 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1534 put_unaligned_le16(0, skb_put(skb
, 2));
1536 bt_cb(skb
)->retries
= 0;
1540 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1542 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1543 struct sk_buff
*skb
;
1544 struct sk_buff_head sar_queue
;
1548 __skb_queue_head_init(&sar_queue
);
1549 control
= L2CAP_SDU_START
;
1550 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->max_pdu_size
, control
, len
);
1552 return PTR_ERR(skb
);
1554 __skb_queue_tail(&sar_queue
, skb
);
1555 len
-= pi
->max_pdu_size
;
1556 size
+=pi
->max_pdu_size
;
1562 if (len
> pi
->max_pdu_size
) {
1563 control
|= L2CAP_SDU_CONTINUE
;
1564 buflen
= pi
->max_pdu_size
;
1566 control
|= L2CAP_SDU_END
;
1570 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1572 skb_queue_purge(&sar_queue
);
1573 return PTR_ERR(skb
);
1576 __skb_queue_tail(&sar_queue
, skb
);
1581 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1582 if (sk
->sk_send_head
== NULL
)
1583 sk
->sk_send_head
= sar_queue
.next
;
1588 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1590 struct sock
*sk
= sock
->sk
;
1591 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1592 struct sk_buff
*skb
;
1596 BT_DBG("sock %p, sk %p", sock
, sk
);
1598 err
= sock_error(sk
);
1602 if (msg
->msg_flags
& MSG_OOB
)
1605 /* Check outgoing MTU */
1606 if (sk
->sk_type
== SOCK_SEQPACKET
&& pi
->mode
== L2CAP_MODE_BASIC
1612 if (sk
->sk_state
!= BT_CONNECTED
) {
1617 /* Connectionless channel */
1618 if (sk
->sk_type
== SOCK_DGRAM
) {
1619 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1620 err
= l2cap_do_send(sk
, skb
);
1625 case L2CAP_MODE_BASIC
:
1626 /* Create a basic PDU */
1627 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1633 err
= l2cap_do_send(sk
, skb
);
1638 case L2CAP_MODE_ERTM
:
1639 case L2CAP_MODE_STREAMING
:
1640 /* Entire SDU fits into one PDU */
1641 if (len
<= pi
->max_pdu_size
) {
1642 control
= L2CAP_SDU_UNSEGMENTED
;
1643 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1648 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1649 if (sk
->sk_send_head
== NULL
)
1650 sk
->sk_send_head
= skb
;
1652 /* Segment SDU into multiples PDUs */
1653 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1658 if (pi
->mode
== L2CAP_MODE_STREAMING
)
1659 err
= l2cap_streaming_send(sk
);
1661 err
= l2cap_ertm_send(sk
);
1668 BT_DBG("bad state %1.1x", pi
->mode
);
1677 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1679 struct sock
*sk
= sock
->sk
;
1683 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1684 struct l2cap_conn_rsp rsp
;
1686 sk
->sk_state
= BT_CONFIG
;
1688 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1689 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1690 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1691 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1692 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1693 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1701 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1704 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1706 struct sock
*sk
= sock
->sk
;
1707 struct l2cap_options opts
;
1711 BT_DBG("sk %p", sk
);
1717 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1718 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1719 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1720 opts
.mode
= l2cap_pi(sk
)->mode
;
1721 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1723 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1724 if (copy_from_user((char *) &opts
, optval
, len
)) {
1729 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1730 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1731 l2cap_pi(sk
)->mode
= opts
.mode
;
1732 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1736 if (get_user(opt
, (u32 __user
*) optval
)) {
1741 if (opt
& L2CAP_LM_AUTH
)
1742 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1743 if (opt
& L2CAP_LM_ENCRYPT
)
1744 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1745 if (opt
& L2CAP_LM_SECURE
)
1746 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1748 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1749 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1761 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1763 struct sock
*sk
= sock
->sk
;
1764 struct bt_security sec
;
1768 BT_DBG("sk %p", sk
);
1770 if (level
== SOL_L2CAP
)
1771 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1773 if (level
!= SOL_BLUETOOTH
)
1774 return -ENOPROTOOPT
;
1780 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1785 sec
.level
= BT_SECURITY_LOW
;
1787 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1788 if (copy_from_user((char *) &sec
, optval
, len
)) {
1793 if (sec
.level
< BT_SECURITY_LOW
||
1794 sec
.level
> BT_SECURITY_HIGH
) {
1799 l2cap_pi(sk
)->sec_level
= sec
.level
;
1802 case BT_DEFER_SETUP
:
1803 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1808 if (get_user(opt
, (u32 __user
*) optval
)) {
1813 bt_sk(sk
)->defer_setup
= opt
;
1825 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1827 struct sock
*sk
= sock
->sk
;
1828 struct l2cap_options opts
;
1829 struct l2cap_conninfo cinfo
;
1833 BT_DBG("sk %p", sk
);
1835 if (get_user(len
, optlen
))
1842 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1843 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1844 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1845 opts
.mode
= l2cap_pi(sk
)->mode
;
1846 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1848 len
= min_t(unsigned int, len
, sizeof(opts
));
1849 if (copy_to_user(optval
, (char *) &opts
, len
))
1855 switch (l2cap_pi(sk
)->sec_level
) {
1856 case BT_SECURITY_LOW
:
1857 opt
= L2CAP_LM_AUTH
;
1859 case BT_SECURITY_MEDIUM
:
1860 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1862 case BT_SECURITY_HIGH
:
1863 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1871 if (l2cap_pi(sk
)->role_switch
)
1872 opt
|= L2CAP_LM_MASTER
;
1874 if (l2cap_pi(sk
)->force_reliable
)
1875 opt
|= L2CAP_LM_RELIABLE
;
1877 if (put_user(opt
, (u32 __user
*) optval
))
1881 case L2CAP_CONNINFO
:
1882 if (sk
->sk_state
!= BT_CONNECTED
&&
1883 !(sk
->sk_state
== BT_CONNECT2
&&
1884 bt_sk(sk
)->defer_setup
)) {
1889 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1890 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1892 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1893 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1907 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1909 struct sock
*sk
= sock
->sk
;
1910 struct bt_security sec
;
1913 BT_DBG("sk %p", sk
);
1915 if (level
== SOL_L2CAP
)
1916 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1918 if (level
!= SOL_BLUETOOTH
)
1919 return -ENOPROTOOPT
;
1921 if (get_user(len
, optlen
))
1928 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_RAW
) {
1933 sec
.level
= l2cap_pi(sk
)->sec_level
;
1935 len
= min_t(unsigned int, len
, sizeof(sec
));
1936 if (copy_to_user(optval
, (char *) &sec
, len
))
1941 case BT_DEFER_SETUP
:
1942 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1947 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1961 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1963 struct sock
*sk
= sock
->sk
;
1966 BT_DBG("sock %p, sk %p", sock
, sk
);
1972 if (!sk
->sk_shutdown
) {
1973 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1974 l2cap_sock_clear_timer(sk
);
1975 __l2cap_sock_close(sk
, 0);
1977 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1978 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1985 static int l2cap_sock_release(struct socket
*sock
)
1987 struct sock
*sk
= sock
->sk
;
1990 BT_DBG("sock %p, sk %p", sock
, sk
);
1995 err
= l2cap_sock_shutdown(sock
, 2);
1998 l2cap_sock_kill(sk
);
2002 static void l2cap_chan_ready(struct sock
*sk
)
2004 struct sock
*parent
= bt_sk(sk
)->parent
;
2006 BT_DBG("sk %p, parent %p", sk
, parent
);
2008 l2cap_pi(sk
)->conf_state
= 0;
2009 l2cap_sock_clear_timer(sk
);
2012 /* Outgoing channel.
2013 * Wake up socket sleeping on connect.
2015 sk
->sk_state
= BT_CONNECTED
;
2016 sk
->sk_state_change(sk
);
2018 /* Incoming channel.
2019 * Wake up socket sleeping on accept.
2021 parent
->sk_data_ready(parent
, 0);
2025 /* Copy frame to all raw sockets on that connection */
2026 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2028 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2029 struct sk_buff
*nskb
;
2032 BT_DBG("conn %p", conn
);
2034 read_lock(&l
->lock
);
2035 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2036 if (sk
->sk_type
!= SOCK_RAW
)
2039 /* Don't send frame to the socket it came from */
2042 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2046 if (sock_queue_rcv_skb(sk
, nskb
))
2049 read_unlock(&l
->lock
);
2052 /* ---- L2CAP signalling commands ---- */
2053 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2054 u8 code
, u8 ident
, u16 dlen
, void *data
)
2056 struct sk_buff
*skb
, **frag
;
2057 struct l2cap_cmd_hdr
*cmd
;
2058 struct l2cap_hdr
*lh
;
2061 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2062 conn
, code
, ident
, dlen
);
2064 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2065 count
= min_t(unsigned int, conn
->mtu
, len
);
2067 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2071 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2072 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2073 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2075 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2078 cmd
->len
= cpu_to_le16(dlen
);
2081 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2082 memcpy(skb_put(skb
, count
), data
, count
);
2088 /* Continuation fragments (no L2CAP header) */
2089 frag
= &skb_shinfo(skb
)->frag_list
;
2091 count
= min_t(unsigned int, conn
->mtu
, len
);
2093 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2097 memcpy(skb_put(*frag
, count
), data
, count
);
2102 frag
= &(*frag
)->next
;
2112 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2114 struct l2cap_conf_opt
*opt
= *ptr
;
2117 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2125 *val
= *((u8
*) opt
->val
);
2129 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2133 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2137 *val
= (unsigned long) opt
->val
;
2141 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2145 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2147 struct l2cap_conf_opt
*opt
= *ptr
;
2149 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2156 *((u8
*) opt
->val
) = val
;
2160 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2164 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2168 memcpy(opt
->val
, (void *) val
, len
);
2172 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2175 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2177 u32 local_feat_mask
= l2cap_feat_mask
;
2179 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2182 case L2CAP_MODE_ERTM
:
2183 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2184 case L2CAP_MODE_STREAMING
:
2185 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2191 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2194 case L2CAP_MODE_STREAMING
:
2195 case L2CAP_MODE_ERTM
:
2196 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2200 return L2CAP_MODE_BASIC
;
2204 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2206 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2207 struct l2cap_conf_req
*req
= data
;
2208 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2209 void *ptr
= req
->data
;
2211 BT_DBG("sk %p", sk
);
2213 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2217 case L2CAP_MODE_STREAMING
:
2218 case L2CAP_MODE_ERTM
:
2219 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2220 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2221 l2cap_send_disconn_req(pi
->conn
, sk
);
2224 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2230 case L2CAP_MODE_BASIC
:
2231 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2232 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2235 case L2CAP_MODE_ERTM
:
2236 rfc
.mode
= L2CAP_MODE_ERTM
;
2237 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2238 rfc
.max_transmit
= L2CAP_DEFAULT_MAX_TX
;
2239 rfc
.retrans_timeout
= 0;
2240 rfc
.monitor_timeout
= 0;
2241 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2243 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2244 sizeof(rfc
), (unsigned long) &rfc
);
2246 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2249 if (pi
->fcs
== L2CAP_FCS_NONE
||
2250 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2251 pi
->fcs
= L2CAP_FCS_NONE
;
2252 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2256 case L2CAP_MODE_STREAMING
:
2257 rfc
.mode
= L2CAP_MODE_STREAMING
;
2259 rfc
.max_transmit
= 0;
2260 rfc
.retrans_timeout
= 0;
2261 rfc
.monitor_timeout
= 0;
2262 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2264 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2265 sizeof(rfc
), (unsigned long) &rfc
);
2267 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2270 if (pi
->fcs
== L2CAP_FCS_NONE
||
2271 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2272 pi
->fcs
= L2CAP_FCS_NONE
;
2273 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2278 /* FIXME: Need actual value of the flush timeout */
2279 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2280 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2282 req
->dcid
= cpu_to_le16(pi
->dcid
);
2283 req
->flags
= cpu_to_le16(0);
2288 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2290 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2291 struct l2cap_conf_rsp
*rsp
= data
;
2292 void *ptr
= rsp
->data
;
2293 void *req
= pi
->conf_req
;
2294 int len
= pi
->conf_len
;
2295 int type
, hint
, olen
;
2297 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2298 u16 mtu
= L2CAP_DEFAULT_MTU
;
2299 u16 result
= L2CAP_CONF_SUCCESS
;
2301 BT_DBG("sk %p", sk
);
2303 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2304 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2306 hint
= type
& L2CAP_CONF_HINT
;
2307 type
&= L2CAP_CONF_MASK
;
2310 case L2CAP_CONF_MTU
:
2314 case L2CAP_CONF_FLUSH_TO
:
2318 case L2CAP_CONF_QOS
:
2321 case L2CAP_CONF_RFC
:
2322 if (olen
== sizeof(rfc
))
2323 memcpy(&rfc
, (void *) val
, olen
);
2326 case L2CAP_CONF_FCS
:
2327 if (val
== L2CAP_FCS_NONE
)
2328 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2336 result
= L2CAP_CONF_UNKNOWN
;
2337 *((u8
*) ptr
++) = type
;
2342 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2346 case L2CAP_MODE_STREAMING
:
2347 case L2CAP_MODE_ERTM
:
2348 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2349 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2350 return -ECONNREFUSED
;
2353 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2358 if (pi
->mode
!= rfc
.mode
) {
2359 result
= L2CAP_CONF_UNACCEPT
;
2360 rfc
.mode
= pi
->mode
;
2362 if (pi
->num_conf_rsp
== 1)
2363 return -ECONNREFUSED
;
2365 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2366 sizeof(rfc
), (unsigned long) &rfc
);
2370 if (result
== L2CAP_CONF_SUCCESS
) {
2371 /* Configure output options and let the other side know
2372 * which ones we don't like. */
2374 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2375 result
= L2CAP_CONF_UNACCEPT
;
2378 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2380 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2383 case L2CAP_MODE_BASIC
:
2384 pi
->fcs
= L2CAP_FCS_NONE
;
2385 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2388 case L2CAP_MODE_ERTM
:
2389 pi
->remote_tx_win
= rfc
.txwin_size
;
2390 pi
->remote_max_tx
= rfc
.max_transmit
;
2391 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2393 rfc
.retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
2394 rfc
.monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
2396 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2398 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2399 sizeof(rfc
), (unsigned long) &rfc
);
2403 case L2CAP_MODE_STREAMING
:
2404 pi
->remote_tx_win
= rfc
.txwin_size
;
2405 pi
->max_pdu_size
= rfc
.max_pdu_size
;
2407 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2409 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2410 sizeof(rfc
), (unsigned long) &rfc
);
2415 result
= L2CAP_CONF_UNACCEPT
;
2417 memset(&rfc
, 0, sizeof(rfc
));
2418 rfc
.mode
= pi
->mode
;
2421 if (result
== L2CAP_CONF_SUCCESS
)
2422 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2424 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2425 rsp
->result
= cpu_to_le16(result
);
2426 rsp
->flags
= cpu_to_le16(0x0000);
2431 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2433 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2434 struct l2cap_conf_req
*req
= data
;
2435 void *ptr
= req
->data
;
2438 struct l2cap_conf_rfc rfc
;
2440 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2442 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2443 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2446 case L2CAP_CONF_MTU
:
2447 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2448 *result
= L2CAP_CONF_UNACCEPT
;
2449 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2452 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2455 case L2CAP_CONF_FLUSH_TO
:
2457 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2461 case L2CAP_CONF_RFC
:
2462 if (olen
== sizeof(rfc
))
2463 memcpy(&rfc
, (void *)val
, olen
);
2465 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2466 rfc
.mode
!= pi
->mode
)
2467 return -ECONNREFUSED
;
2469 pi
->mode
= rfc
.mode
;
2472 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2473 sizeof(rfc
), (unsigned long) &rfc
);
2478 if (*result
== L2CAP_CONF_SUCCESS
) {
2480 case L2CAP_MODE_ERTM
:
2481 pi
->remote_tx_win
= rfc
.txwin_size
;
2482 pi
->retrans_timeout
= rfc
.retrans_timeout
;
2483 pi
->monitor_timeout
= rfc
.monitor_timeout
;
2484 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2486 case L2CAP_MODE_STREAMING
:
2487 pi
->max_pdu_size
= le16_to_cpu(rfc
.max_pdu_size
);
2492 req
->dcid
= cpu_to_le16(pi
->dcid
);
2493 req
->flags
= cpu_to_le16(0x0000);
2498 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2500 struct l2cap_conf_rsp
*rsp
= data
;
2501 void *ptr
= rsp
->data
;
2503 BT_DBG("sk %p", sk
);
2505 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2506 rsp
->result
= cpu_to_le16(result
);
2507 rsp
->flags
= cpu_to_le16(flags
);
2512 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2514 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2516 if (rej
->reason
!= 0x0000)
2519 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2520 cmd
->ident
== conn
->info_ident
) {
2521 del_timer(&conn
->info_timer
);
2523 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2524 conn
->info_ident
= 0;
2526 l2cap_conn_start(conn
);
2532 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2534 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2535 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2536 struct l2cap_conn_rsp rsp
;
2537 struct sock
*sk
, *parent
;
2538 int result
, status
= L2CAP_CS_NO_INFO
;
2540 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2541 __le16 psm
= req
->psm
;
2543 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2545 /* Check if we have socket listening on psm */
2546 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2548 result
= L2CAP_CR_BAD_PSM
;
2552 /* Check if the ACL is secure enough (if not SDP) */
2553 if (psm
!= cpu_to_le16(0x0001) &&
2554 !hci_conn_check_link_mode(conn
->hcon
)) {
2555 conn
->disc_reason
= 0x05;
2556 result
= L2CAP_CR_SEC_BLOCK
;
2560 result
= L2CAP_CR_NO_MEM
;
2562 /* Check for backlog size */
2563 if (sk_acceptq_is_full(parent
)) {
2564 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2568 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2572 write_lock_bh(&list
->lock
);
2574 /* Check if we already have channel with that dcid */
2575 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2576 write_unlock_bh(&list
->lock
);
2577 sock_set_flag(sk
, SOCK_ZAPPED
);
2578 l2cap_sock_kill(sk
);
2582 hci_conn_hold(conn
->hcon
);
2584 l2cap_sock_init(sk
, parent
);
2585 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2586 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2587 l2cap_pi(sk
)->psm
= psm
;
2588 l2cap_pi(sk
)->dcid
= scid
;
2590 __l2cap_chan_add(conn
, sk
, parent
);
2591 dcid
= l2cap_pi(sk
)->scid
;
2593 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2595 l2cap_pi(sk
)->ident
= cmd
->ident
;
2597 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2598 if (l2cap_check_security(sk
)) {
2599 if (bt_sk(sk
)->defer_setup
) {
2600 sk
->sk_state
= BT_CONNECT2
;
2601 result
= L2CAP_CR_PEND
;
2602 status
= L2CAP_CS_AUTHOR_PEND
;
2603 parent
->sk_data_ready(parent
, 0);
2605 sk
->sk_state
= BT_CONFIG
;
2606 result
= L2CAP_CR_SUCCESS
;
2607 status
= L2CAP_CS_NO_INFO
;
2610 sk
->sk_state
= BT_CONNECT2
;
2611 result
= L2CAP_CR_PEND
;
2612 status
= L2CAP_CS_AUTHEN_PEND
;
2615 sk
->sk_state
= BT_CONNECT2
;
2616 result
= L2CAP_CR_PEND
;
2617 status
= L2CAP_CS_NO_INFO
;
2620 write_unlock_bh(&list
->lock
);
2623 bh_unlock_sock(parent
);
2626 rsp
.scid
= cpu_to_le16(scid
);
2627 rsp
.dcid
= cpu_to_le16(dcid
);
2628 rsp
.result
= cpu_to_le16(result
);
2629 rsp
.status
= cpu_to_le16(status
);
2630 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2632 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2633 struct l2cap_info_req info
;
2634 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2636 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2637 conn
->info_ident
= l2cap_get_ident(conn
);
2639 mod_timer(&conn
->info_timer
, jiffies
+
2640 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2642 l2cap_send_cmd(conn
, conn
->info_ident
,
2643 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2649 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2651 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2652 u16 scid
, dcid
, result
, status
;
2656 scid
= __le16_to_cpu(rsp
->scid
);
2657 dcid
= __le16_to_cpu(rsp
->dcid
);
2658 result
= __le16_to_cpu(rsp
->result
);
2659 status
= __le16_to_cpu(rsp
->status
);
2661 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2664 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2668 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2674 case L2CAP_CR_SUCCESS
:
2675 sk
->sk_state
= BT_CONFIG
;
2676 l2cap_pi(sk
)->ident
= 0;
2677 l2cap_pi(sk
)->dcid
= dcid
;
2678 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2680 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2682 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2683 l2cap_build_conf_req(sk
, req
), req
);
2684 l2cap_pi(sk
)->num_conf_req
++;
2688 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2692 l2cap_chan_del(sk
, ECONNREFUSED
);
2700 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2702 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2708 dcid
= __le16_to_cpu(req
->dcid
);
2709 flags
= __le16_to_cpu(req
->flags
);
2711 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2713 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2717 if (sk
->sk_state
== BT_DISCONN
)
2720 /* Reject if config buffer is too small. */
2721 len
= cmd_len
- sizeof(*req
);
2722 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2723 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2724 l2cap_build_conf_rsp(sk
, rsp
,
2725 L2CAP_CONF_REJECT
, flags
), rsp
);
2730 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2731 l2cap_pi(sk
)->conf_len
+= len
;
2733 if (flags
& 0x0001) {
2734 /* Incomplete config. Send empty response. */
2735 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2736 l2cap_build_conf_rsp(sk
, rsp
,
2737 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2741 /* Complete config. */
2742 len
= l2cap_parse_conf_req(sk
, rsp
);
2744 l2cap_send_disconn_req(conn
, sk
);
2748 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2749 l2cap_pi(sk
)->num_conf_rsp
++;
2751 /* Reset config buffer. */
2752 l2cap_pi(sk
)->conf_len
= 0;
2754 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2757 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2758 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
)
2759 || l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2760 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2762 sk
->sk_state
= BT_CONNECTED
;
2763 l2cap_pi(sk
)->next_tx_seq
= 0;
2764 l2cap_pi(sk
)->expected_ack_seq
= 0;
2765 l2cap_pi(sk
)->unacked_frames
= 0;
2767 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2768 l2cap_retrans_timeout
, (unsigned long) sk
);
2769 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2770 l2cap_monitor_timeout
, (unsigned long) sk
);
2772 __skb_queue_head_init(TX_QUEUE(sk
));
2773 __skb_queue_head_init(SREJ_QUEUE(sk
));
2774 l2cap_chan_ready(sk
);
2778 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2780 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2781 l2cap_build_conf_req(sk
, buf
), buf
);
2782 l2cap_pi(sk
)->num_conf_req
++;
2790 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2792 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2793 u16 scid
, flags
, result
;
2796 scid
= __le16_to_cpu(rsp
->scid
);
2797 flags
= __le16_to_cpu(rsp
->flags
);
2798 result
= __le16_to_cpu(rsp
->result
);
2800 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2801 scid
, flags
, result
);
2803 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2808 case L2CAP_CONF_SUCCESS
:
2811 case L2CAP_CONF_UNACCEPT
:
2812 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2813 int len
= cmd
->len
- sizeof(*rsp
);
2816 /* throw out any old stored conf requests */
2817 result
= L2CAP_CONF_SUCCESS
;
2818 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2821 l2cap_send_disconn_req(conn
, sk
);
2825 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2826 L2CAP_CONF_REQ
, len
, req
);
2827 l2cap_pi(sk
)->num_conf_req
++;
2828 if (result
!= L2CAP_CONF_SUCCESS
)
2834 sk
->sk_state
= BT_DISCONN
;
2835 sk
->sk_err
= ECONNRESET
;
2836 l2cap_sock_set_timer(sk
, HZ
* 5);
2837 l2cap_send_disconn_req(conn
, sk
);
2844 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2846 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2847 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
)
2848 || l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
2849 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
2851 sk
->sk_state
= BT_CONNECTED
;
2852 l2cap_pi(sk
)->expected_tx_seq
= 0;
2853 l2cap_pi(sk
)->buffer_seq
= 0;
2854 l2cap_pi(sk
)->num_to_ack
= 0;
2855 __skb_queue_head_init(TX_QUEUE(sk
));
2856 __skb_queue_head_init(SREJ_QUEUE(sk
));
2857 l2cap_chan_ready(sk
);
2865 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2867 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2868 struct l2cap_disconn_rsp rsp
;
2872 scid
= __le16_to_cpu(req
->scid
);
2873 dcid
= __le16_to_cpu(req
->dcid
);
2875 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2877 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2881 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2882 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2883 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2885 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2887 skb_queue_purge(TX_QUEUE(sk
));
2888 skb_queue_purge(SREJ_QUEUE(sk
));
2889 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2890 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2892 l2cap_chan_del(sk
, ECONNRESET
);
2895 l2cap_sock_kill(sk
);
2899 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2901 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2905 scid
= __le16_to_cpu(rsp
->scid
);
2906 dcid
= __le16_to_cpu(rsp
->dcid
);
2908 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2910 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2914 skb_queue_purge(TX_QUEUE(sk
));
2915 skb_queue_purge(SREJ_QUEUE(sk
));
2916 del_timer(&l2cap_pi(sk
)->retrans_timer
);
2917 del_timer(&l2cap_pi(sk
)->monitor_timer
);
2919 l2cap_chan_del(sk
, 0);
2922 l2cap_sock_kill(sk
);
2926 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2928 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2931 type
= __le16_to_cpu(req
->type
);
2933 BT_DBG("type 0x%4.4x", type
);
2935 if (type
== L2CAP_IT_FEAT_MASK
) {
2937 u32 feat_mask
= l2cap_feat_mask
;
2938 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2939 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2940 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2942 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2944 put_unaligned_le32(feat_mask
, rsp
->data
);
2945 l2cap_send_cmd(conn
, cmd
->ident
,
2946 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2947 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2949 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2950 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2951 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2952 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2953 l2cap_send_cmd(conn
, cmd
->ident
,
2954 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2956 struct l2cap_info_rsp rsp
;
2957 rsp
.type
= cpu_to_le16(type
);
2958 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2959 l2cap_send_cmd(conn
, cmd
->ident
,
2960 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2966 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2968 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2971 type
= __le16_to_cpu(rsp
->type
);
2972 result
= __le16_to_cpu(rsp
->result
);
2974 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2976 del_timer(&conn
->info_timer
);
2978 if (type
== L2CAP_IT_FEAT_MASK
) {
2979 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2981 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2982 struct l2cap_info_req req
;
2983 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2985 conn
->info_ident
= l2cap_get_ident(conn
);
2987 l2cap_send_cmd(conn
, conn
->info_ident
,
2988 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2990 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2991 conn
->info_ident
= 0;
2993 l2cap_conn_start(conn
);
2995 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2996 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2997 conn
->info_ident
= 0;
2999 l2cap_conn_start(conn
);
3005 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3007 u8
*data
= skb
->data
;
3009 struct l2cap_cmd_hdr cmd
;
3012 l2cap_raw_recv(conn
, skb
);
3014 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3016 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3017 data
+= L2CAP_CMD_HDR_SIZE
;
3018 len
-= L2CAP_CMD_HDR_SIZE
;
3020 cmd_len
= le16_to_cpu(cmd
.len
);
3022 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3024 if (cmd_len
> len
|| !cmd
.ident
) {
3025 BT_DBG("corrupted command");
3030 case L2CAP_COMMAND_REJ
:
3031 l2cap_command_rej(conn
, &cmd
, data
);
3034 case L2CAP_CONN_REQ
:
3035 err
= l2cap_connect_req(conn
, &cmd
, data
);
3038 case L2CAP_CONN_RSP
:
3039 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3042 case L2CAP_CONF_REQ
:
3043 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3046 case L2CAP_CONF_RSP
:
3047 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3050 case L2CAP_DISCONN_REQ
:
3051 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3054 case L2CAP_DISCONN_RSP
:
3055 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3058 case L2CAP_ECHO_REQ
:
3059 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3062 case L2CAP_ECHO_RSP
:
3065 case L2CAP_INFO_REQ
:
3066 err
= l2cap_information_req(conn
, &cmd
, data
);
3069 case L2CAP_INFO_RSP
:
3070 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3074 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3080 struct l2cap_cmd_rej rej
;
3081 BT_DBG("error %d", err
);
3083 /* FIXME: Map err to a valid reason */
3084 rej
.reason
= cpu_to_le16(0);
3085 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3095 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3097 u16 our_fcs
, rcv_fcs
;
3098 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3100 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3101 skb_trim(skb
, skb
->len
- 2);
3102 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3103 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3105 if (our_fcs
!= rcv_fcs
)
3111 static void l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3113 struct sk_buff
*next_skb
;
3115 bt_cb(skb
)->tx_seq
= tx_seq
;
3116 bt_cb(skb
)->sar
= sar
;
3118 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3120 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3125 if (bt_cb(next_skb
)->tx_seq
> tx_seq
) {
3126 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3130 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3133 } while((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3135 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3138 static int l2cap_sar_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3140 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3141 struct sk_buff
*_skb
;
3144 switch (control
& L2CAP_CTRL_SAR
) {
3145 case L2CAP_SDU_UNSEGMENTED
:
3146 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3151 err
= sock_queue_rcv_skb(sk
, skb
);
3157 case L2CAP_SDU_START
:
3158 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3163 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3166 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3172 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3174 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3175 pi
->partial_sdu_len
= skb
->len
;
3179 case L2CAP_SDU_CONTINUE
:
3180 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3183 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3185 pi
->partial_sdu_len
+= skb
->len
;
3186 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3194 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3197 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3199 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3200 pi
->partial_sdu_len
+= skb
->len
;
3202 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3203 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3204 err
= sock_queue_rcv_skb(sk
, _skb
);
3218 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3220 struct sk_buff
*skb
;
3223 while((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3224 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3227 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3228 control
|= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3229 l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3230 l2cap_pi(sk
)->buffer_seq_srej
=
3231 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3236 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3238 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3239 struct srej_list
*l
, *tmp
;
3242 list_for_each_entry_safe(l
,tmp
, SREJ_LIST(sk
), list
) {
3243 if (l
->tx_seq
== tx_seq
) {
3248 control
= L2CAP_SUPER_SELECT_REJECT
;
3249 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3250 l2cap_send_sframe(pi
, control
);
3252 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3256 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3258 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3259 struct srej_list
*new;
3262 while (tx_seq
!= pi
->expected_tx_seq
) {
3263 control
= L2CAP_SUPER_SELECT_REJECT
;
3264 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3265 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
3266 control
|= L2CAP_CTRL_POLL
;
3267 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
3269 l2cap_send_sframe(pi
, control
);
3271 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3272 new->tx_seq
= pi
->expected_tx_seq
++;
3273 list_add_tail(&new->list
, SREJ_LIST(sk
));
3275 pi
->expected_tx_seq
++;
3278 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3280 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3281 u8 tx_seq
= __get_txseq(rx_control
);
3283 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3286 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3288 if (tx_seq
== pi
->expected_tx_seq
)
3291 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3292 struct srej_list
*first
;
3294 first
= list_first_entry(SREJ_LIST(sk
),
3295 struct srej_list
, list
);
3296 if (tx_seq
== first
->tx_seq
) {
3297 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3298 l2cap_check_srej_gap(sk
, tx_seq
);
3300 list_del(&first
->list
);
3303 if (list_empty(SREJ_LIST(sk
))) {
3304 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3305 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3308 struct srej_list
*l
;
3309 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3311 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3312 if (l
->tx_seq
== tx_seq
) {
3313 l2cap_resend_srejframe(sk
, tx_seq
);
3317 l2cap_send_srejframe(sk
, tx_seq
);
3320 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3322 INIT_LIST_HEAD(SREJ_LIST(sk
));
3323 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3325 __skb_queue_head_init(SREJ_QUEUE(sk
));
3326 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3328 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3330 l2cap_send_srejframe(sk
, tx_seq
);
3335 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3337 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3338 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3342 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3344 err
= l2cap_sar_reassembly_sdu(sk
, skb
, rx_control
);
3348 pi
->num_to_ack
= (pi
->num_to_ack
+ 1) % L2CAP_DEFAULT_NUM_TO_ACK
;
3349 if (pi
->num_to_ack
== L2CAP_DEFAULT_NUM_TO_ACK
- 1) {
3350 tx_control
|= L2CAP_SUPER_RCV_READY
;
3351 tx_control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3352 l2cap_send_sframe(pi
, tx_control
);
3357 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3359 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3360 u8 tx_seq
= __get_reqseq(rx_control
);
3362 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3364 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3365 case L2CAP_SUPER_RCV_READY
:
3366 if (rx_control
& L2CAP_CTRL_POLL
) {
3367 u16 control
= L2CAP_CTRL_FINAL
;
3368 control
|= L2CAP_SUPER_RCV_READY
|
3369 (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
);
3370 l2cap_send_sframe(l2cap_pi(sk
), control
);
3371 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3373 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3374 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3375 pi
->expected_ack_seq
= tx_seq
;
3376 l2cap_drop_acked_frames(sk
);
3378 if (!(pi
->conn_state
& L2CAP_CONN_WAIT_F
))
3381 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3382 del_timer(&pi
->monitor_timer
);
3384 if (pi
->unacked_frames
> 0)
3385 __mod_retrans_timer();
3387 pi
->expected_ack_seq
= tx_seq
;
3388 l2cap_drop_acked_frames(sk
);
3390 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3391 && (pi
->unacked_frames
> 0))
3392 __mod_retrans_timer();
3394 l2cap_ertm_send(sk
);
3395 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3399 case L2CAP_SUPER_REJECT
:
3400 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3402 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3403 l2cap_drop_acked_frames(sk
);
3405 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
3406 pi
->next_tx_seq
= pi
->expected_ack_seq
;
3408 l2cap_ertm_send(sk
);
3412 case L2CAP_SUPER_SELECT_REJECT
:
3413 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3415 if (rx_control
& L2CAP_CTRL_POLL
) {
3416 l2cap_retransmit_frame(sk
, tx_seq
);
3417 pi
->expected_ack_seq
= tx_seq
;
3418 l2cap_drop_acked_frames(sk
);
3419 l2cap_ertm_send(sk
);
3420 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3421 pi
->srej_save_reqseq
= tx_seq
;
3422 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3424 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3425 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3426 pi
->srej_save_reqseq
== tx_seq
)
3427 pi
->srej_save_reqseq
&= ~L2CAP_CONN_SREJ_ACT
;
3429 l2cap_retransmit_frame(sk
, tx_seq
);
3432 l2cap_retransmit_frame(sk
, tx_seq
);
3433 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3434 pi
->srej_save_reqseq
= tx_seq
;
3435 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3440 case L2CAP_SUPER_RCV_NOT_READY
:
3441 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3442 pi
->expected_ack_seq
= tx_seq
;
3443 l2cap_drop_acked_frames(sk
);
3445 del_timer(&l2cap_pi(sk
)->retrans_timer
);
3446 if (rx_control
& L2CAP_CTRL_POLL
) {
3447 u16 control
= L2CAP_CTRL_FINAL
;
3448 l2cap_send_rr_or_rnr(l2cap_pi(sk
), control
);
3456 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3459 struct l2cap_pinfo
*pi
;
3464 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3466 BT_DBG("unknown cid 0x%4.4x", cid
);
3472 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3474 if (sk
->sk_state
!= BT_CONNECTED
)
3478 case L2CAP_MODE_BASIC
:
3479 /* If socket recv buffers overflows we drop data here
3480 * which is *bad* because L2CAP has to be reliable.
3481 * But we don't have any other choice. L2CAP doesn't
3482 * provide flow control mechanism. */
3484 if (pi
->imtu
< skb
->len
)
3487 if (!sock_queue_rcv_skb(sk
, skb
))
3491 case L2CAP_MODE_ERTM
:
3492 control
= get_unaligned_le16(skb
->data
);
3496 if (__is_sar_start(control
))
3499 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3503 * We can just drop the corrupted I-frame here.
3504 * Receiver will miss it and start proper recovery
3505 * procedures and ask retransmission.
3507 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
)
3510 if (l2cap_check_fcs(pi
, skb
))
3513 if (__is_iframe(control
))
3514 err
= l2cap_data_channel_iframe(sk
, control
, skb
);
3516 err
= l2cap_data_channel_sframe(sk
, control
, skb
);
3522 case L2CAP_MODE_STREAMING
:
3523 control
= get_unaligned_le16(skb
->data
);
3527 if (__is_sar_start(control
))
3530 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3533 if (len
> L2CAP_DEFAULT_MAX_PDU_SIZE
|| __is_sframe(control
))
3536 if (l2cap_check_fcs(pi
, skb
))
3539 tx_seq
= __get_txseq(control
);
3541 if (pi
->expected_tx_seq
== tx_seq
)
3542 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3544 pi
->expected_tx_seq
= tx_seq
+ 1;
3546 err
= l2cap_sar_reassembly_sdu(sk
, skb
, control
);
3551 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, l2cap_pi(sk
)->mode
);
3565 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3569 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3573 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3575 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3578 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3581 if (!sock_queue_rcv_skb(sk
, skb
))
3593 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3595 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3599 skb_pull(skb
, L2CAP_HDR_SIZE
);
3600 cid
= __le16_to_cpu(lh
->cid
);
3601 len
= __le16_to_cpu(lh
->len
);
3603 if (len
!= skb
->len
) {
3608 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3611 case L2CAP_CID_SIGNALING
:
3612 l2cap_sig_channel(conn
, skb
);
3615 case L2CAP_CID_CONN_LESS
:
3616 psm
= get_unaligned_le16(skb
->data
);
3618 l2cap_conless_channel(conn
, psm
, skb
);
3622 l2cap_data_channel(conn
, cid
, skb
);
3627 /* ---- L2CAP interface with lower layer (HCI) ---- */
3629 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3631 int exact
= 0, lm1
= 0, lm2
= 0;
3632 register struct sock
*sk
;
3633 struct hlist_node
*node
;
3635 if (type
!= ACL_LINK
)
3638 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3640 /* Find listening sockets and check their link_mode */
3641 read_lock(&l2cap_sk_list
.lock
);
3642 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3643 if (sk
->sk_state
!= BT_LISTEN
)
3646 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3647 lm1
|= HCI_LM_ACCEPT
;
3648 if (l2cap_pi(sk
)->role_switch
)
3649 lm1
|= HCI_LM_MASTER
;
3651 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3652 lm2
|= HCI_LM_ACCEPT
;
3653 if (l2cap_pi(sk
)->role_switch
)
3654 lm2
|= HCI_LM_MASTER
;
3657 read_unlock(&l2cap_sk_list
.lock
);
3659 return exact
? lm1
: lm2
;
3662 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3664 struct l2cap_conn
*conn
;
3666 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3668 if (hcon
->type
!= ACL_LINK
)
3672 conn
= l2cap_conn_add(hcon
, status
);
3674 l2cap_conn_ready(conn
);
3676 l2cap_conn_del(hcon
, bt_err(status
));
3681 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3683 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3685 BT_DBG("hcon %p", hcon
);
3687 if (hcon
->type
!= ACL_LINK
|| !conn
)
3690 return conn
->disc_reason
;
3693 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3695 BT_DBG("hcon %p reason %d", hcon
, reason
);
3697 if (hcon
->type
!= ACL_LINK
)
3700 l2cap_conn_del(hcon
, bt_err(reason
));
3705 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3707 if (sk
->sk_type
!= SOCK_SEQPACKET
)
3710 if (encrypt
== 0x00) {
3711 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3712 l2cap_sock_clear_timer(sk
);
3713 l2cap_sock_set_timer(sk
, HZ
* 5);
3714 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3715 __l2cap_sock_close(sk
, ECONNREFUSED
);
3717 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3718 l2cap_sock_clear_timer(sk
);
3722 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3724 struct l2cap_chan_list
*l
;
3725 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3731 l
= &conn
->chan_list
;
3733 BT_DBG("conn %p", conn
);
3735 read_lock(&l
->lock
);
3737 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3740 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3745 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3746 sk
->sk_state
== BT_CONFIG
)) {
3747 l2cap_check_encryption(sk
, encrypt
);
3752 if (sk
->sk_state
== BT_CONNECT
) {
3754 struct l2cap_conn_req req
;
3755 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3756 req
.psm
= l2cap_pi(sk
)->psm
;
3758 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3760 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3761 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3763 l2cap_sock_clear_timer(sk
);
3764 l2cap_sock_set_timer(sk
, HZ
/ 10);
3766 } else if (sk
->sk_state
== BT_CONNECT2
) {
3767 struct l2cap_conn_rsp rsp
;
3771 sk
->sk_state
= BT_CONFIG
;
3772 result
= L2CAP_CR_SUCCESS
;
3774 sk
->sk_state
= BT_DISCONN
;
3775 l2cap_sock_set_timer(sk
, HZ
/ 10);
3776 result
= L2CAP_CR_SEC_BLOCK
;
3779 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3780 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3781 rsp
.result
= cpu_to_le16(result
);
3782 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3783 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3784 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3790 read_unlock(&l
->lock
);
3795 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3797 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3799 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
3802 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3804 if (flags
& ACL_START
) {
3805 struct l2cap_hdr
*hdr
;
3809 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3810 kfree_skb(conn
->rx_skb
);
3811 conn
->rx_skb
= NULL
;
3813 l2cap_conn_unreliable(conn
, ECOMM
);
3817 BT_ERR("Frame is too short (len %d)", skb
->len
);
3818 l2cap_conn_unreliable(conn
, ECOMM
);
3822 hdr
= (struct l2cap_hdr
*) skb
->data
;
3823 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3825 if (len
== skb
->len
) {
3826 /* Complete frame received */
3827 l2cap_recv_frame(conn
, skb
);
3831 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3833 if (skb
->len
> len
) {
3834 BT_ERR("Frame is too long (len %d, expected len %d)",
3836 l2cap_conn_unreliable(conn
, ECOMM
);
3840 /* Allocate skb for the complete frame (with header) */
3841 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3845 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3847 conn
->rx_len
= len
- skb
->len
;
3849 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
3851 if (!conn
->rx_len
) {
3852 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
3853 l2cap_conn_unreliable(conn
, ECOMM
);
3857 if (skb
->len
> conn
->rx_len
) {
3858 BT_ERR("Fragment is too long (len %d, expected %d)",
3859 skb
->len
, conn
->rx_len
);
3860 kfree_skb(conn
->rx_skb
);
3861 conn
->rx_skb
= NULL
;
3863 l2cap_conn_unreliable(conn
, ECOMM
);
3867 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
3869 conn
->rx_len
-= skb
->len
;
3871 if (!conn
->rx_len
) {
3872 /* Complete frame received */
3873 l2cap_recv_frame(conn
, conn
->rx_skb
);
3874 conn
->rx_skb
= NULL
;
3883 static ssize_t
l2cap_sysfs_show(struct class *dev
, char *buf
)
3886 struct hlist_node
*node
;
3889 read_lock_bh(&l2cap_sk_list
.lock
);
3891 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3892 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3894 str
+= sprintf(str
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3895 batostr(&bt_sk(sk
)->src
), batostr(&bt_sk(sk
)->dst
),
3896 sk
->sk_state
, __le16_to_cpu(pi
->psm
), pi
->scid
,
3897 pi
->dcid
, pi
->imtu
, pi
->omtu
, pi
->sec_level
);
3900 read_unlock_bh(&l2cap_sk_list
.lock
);
3905 static CLASS_ATTR(l2cap
, S_IRUGO
, l2cap_sysfs_show
, NULL
);
3907 static const struct proto_ops l2cap_sock_ops
= {
3908 .family
= PF_BLUETOOTH
,
3909 .owner
= THIS_MODULE
,
3910 .release
= l2cap_sock_release
,
3911 .bind
= l2cap_sock_bind
,
3912 .connect
= l2cap_sock_connect
,
3913 .listen
= l2cap_sock_listen
,
3914 .accept
= l2cap_sock_accept
,
3915 .getname
= l2cap_sock_getname
,
3916 .sendmsg
= l2cap_sock_sendmsg
,
3917 .recvmsg
= l2cap_sock_recvmsg
,
3918 .poll
= bt_sock_poll
,
3919 .ioctl
= bt_sock_ioctl
,
3920 .mmap
= sock_no_mmap
,
3921 .socketpair
= sock_no_socketpair
,
3922 .shutdown
= l2cap_sock_shutdown
,
3923 .setsockopt
= l2cap_sock_setsockopt
,
3924 .getsockopt
= l2cap_sock_getsockopt
3927 static struct net_proto_family l2cap_sock_family_ops
= {
3928 .family
= PF_BLUETOOTH
,
3929 .owner
= THIS_MODULE
,
3930 .create
= l2cap_sock_create
,
3933 static struct hci_proto l2cap_hci_proto
= {
3935 .id
= HCI_PROTO_L2CAP
,
3936 .connect_ind
= l2cap_connect_ind
,
3937 .connect_cfm
= l2cap_connect_cfm
,
3938 .disconn_ind
= l2cap_disconn_ind
,
3939 .disconn_cfm
= l2cap_disconn_cfm
,
3940 .security_cfm
= l2cap_security_cfm
,
3941 .recv_acldata
= l2cap_recv_acldata
3944 static int __init
l2cap_init(void)
3948 err
= proto_register(&l2cap_proto
, 0);
3952 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
3954 BT_ERR("L2CAP socket registration failed");
3958 err
= hci_register_proto(&l2cap_hci_proto
);
3960 BT_ERR("L2CAP protocol registration failed");
3961 bt_sock_unregister(BTPROTO_L2CAP
);
3965 if (class_create_file(bt_class
, &class_attr_l2cap
) < 0)
3966 BT_ERR("Failed to create L2CAP info file");
3968 BT_INFO("L2CAP ver %s", VERSION
);
3969 BT_INFO("L2CAP socket layer initialized");
3974 proto_unregister(&l2cap_proto
);
3978 static void __exit
l2cap_exit(void)
3980 class_remove_file(bt_class
, &class_attr_l2cap
);
3982 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
3983 BT_ERR("L2CAP socket unregistration failed");
3985 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
3986 BT_ERR("L2CAP protocol unregistration failed");
3988 proto_unregister(&l2cap_proto
);
3991 void l2cap_load(void)
3993 /* Dummy function to trigger automatic L2CAP module loading by
3994 * other modules that use L2CAP sockets but don't use any other
3995 * symbols from it. */
3998 EXPORT_SYMBOL(l2cap_load
);
4000 module_init(l2cap_init
);
4001 module_exit(l2cap_exit
);
4003 module_param(enable_ertm
, bool, 0644);
4004 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4006 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4007 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4008 MODULE_VERSION(VERSION
);
4009 MODULE_LICENSE("GPL");
4010 MODULE_ALIAS("bt-proto-0");