2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list
);
40 static DEFINE_MUTEX(mgmt_chan_list_lock
);
42 static DEFINE_IDA(sock_cookie_ida
);
44 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter
;
56 unsigned short channel
;
59 char comm
[TASK_COMM_LEN
];
62 void hci_sock_set_flag(struct sock
*sk
, int nr
)
64 set_bit(nr
, &hci_pi(sk
)->flags
);
67 void hci_sock_clear_flag(struct sock
*sk
, int nr
)
69 clear_bit(nr
, &hci_pi(sk
)->flags
);
72 int hci_sock_test_flag(struct sock
*sk
, int nr
)
74 return test_bit(nr
, &hci_pi(sk
)->flags
);
77 unsigned short hci_sock_get_channel(struct sock
*sk
)
79 return hci_pi(sk
)->channel
;
82 u32
hci_sock_get_cookie(struct sock
*sk
)
84 return hci_pi(sk
)->cookie
;
87 static bool hci_sock_gen_cookie(struct sock
*sk
)
89 int id
= hci_pi(sk
)->cookie
;
92 id
= ida_simple_get(&sock_cookie_ida
, 1, 0, GFP_KERNEL
);
96 hci_pi(sk
)->cookie
= id
;
97 get_task_comm(hci_pi(sk
)->comm
, current
);
104 static void hci_sock_free_cookie(struct sock
*sk
)
106 int id
= hci_pi(sk
)->cookie
;
109 hci_pi(sk
)->cookie
= 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida
, id
);
114 static inline int hci_test_bit(int nr
, const void *addr
)
116 return *((const __u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF 5
122 struct hci_sec_filter
{
125 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
128 static const struct hci_sec_filter hci_sec_filter
= {
132 { 0x1000d9fe, 0x0000b00c },
137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 /* OGF_LINK_POLICY */
139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 /* OGF_STATUS_PARAM */
145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
149 static struct bt_sock_list hci_sk_list
= {
150 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
153 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
155 struct hci_filter
*flt
;
156 int flt_type
, flt_event
;
159 flt
= &hci_pi(sk
)->filter
;
161 flt_type
= hci_skb_pkt_type(skb
) & HCI_FLT_TYPE_BITS
;
163 if (!test_bit(flt_type
, &flt
->type_mask
))
166 /* Extra filter for event packets only */
167 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
)
170 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
172 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
175 /* Check filter only when opcode is set */
179 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
180 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
183 if (flt_event
== HCI_EV_CMD_STATUS
&&
184 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
190 /* Send frame to RAW socket */
191 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
194 struct sk_buff
*skb_copy
= NULL
;
196 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
198 read_lock(&hci_sk_list
.lock
);
200 sk_for_each(sk
, &hci_sk_list
.head
) {
201 struct sk_buff
*nskb
;
203 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
206 /* Don't send frame to the socket it came from */
210 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
211 if (hci_skb_pkt_type(skb
) != HCI_COMMAND_PKT
&&
212 hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
213 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
214 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
)
216 if (is_filtered_packet(sk
, skb
))
218 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
219 if (!bt_cb(skb
)->incoming
)
221 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
222 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
223 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
)
226 /* Don't send frame to other channel types */
231 /* Create a private copy with headroom */
232 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
236 /* Put type byte before the data */
237 memcpy(skb_push(skb_copy
, 1), &hci_skb_pkt_type(skb
), 1);
240 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
244 if (sock_queue_rcv_skb(sk
, nskb
))
248 read_unlock(&hci_sk_list
.lock
);
253 /* Send frame to sockets with specific channel */
254 static void __hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
255 int flag
, struct sock
*skip_sk
)
259 BT_DBG("channel %u len %d", channel
, skb
->len
);
261 sk_for_each(sk
, &hci_sk_list
.head
) {
262 struct sk_buff
*nskb
;
264 /* Ignore socket without the flag set */
265 if (!hci_sock_test_flag(sk
, flag
))
268 /* Skip the original socket */
272 if (sk
->sk_state
!= BT_BOUND
)
275 if (hci_pi(sk
)->channel
!= channel
)
278 nskb
= skb_clone(skb
, GFP_ATOMIC
);
282 if (sock_queue_rcv_skb(sk
, nskb
))
288 void hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
289 int flag
, struct sock
*skip_sk
)
291 read_lock(&hci_sk_list
.lock
);
292 __hci_send_to_channel(channel
, skb
, flag
, skip_sk
);
293 read_unlock(&hci_sk_list
.lock
);
296 /* Send frame to monitor socket */
297 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
299 struct sk_buff
*skb_copy
= NULL
;
300 struct hci_mon_hdr
*hdr
;
303 if (!atomic_read(&monitor_promisc
))
306 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
308 switch (hci_skb_pkt_type(skb
)) {
309 case HCI_COMMAND_PKT
:
310 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
313 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
315 case HCI_ACLDATA_PKT
:
316 if (bt_cb(skb
)->incoming
)
317 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
319 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
321 case HCI_SCODATA_PKT
:
322 if (bt_cb(skb
)->incoming
)
323 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
325 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
328 opcode
= cpu_to_le16(HCI_MON_VENDOR_DIAG
);
334 /* Create a private copy with headroom */
335 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
339 /* Put header before the data */
340 hdr
= skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
341 hdr
->opcode
= opcode
;
342 hdr
->index
= cpu_to_le16(hdev
->id
);
343 hdr
->len
= cpu_to_le16(skb
->len
);
345 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb_copy
,
346 HCI_SOCK_TRUSTED
, NULL
);
350 void hci_send_monitor_ctrl_event(struct hci_dev
*hdev
, u16 event
,
351 void *data
, u16 data_len
, ktime_t tstamp
,
352 int flag
, struct sock
*skip_sk
)
358 index
= cpu_to_le16(hdev
->id
);
360 index
= cpu_to_le16(MGMT_INDEX_NONE
);
362 read_lock(&hci_sk_list
.lock
);
364 sk_for_each(sk
, &hci_sk_list
.head
) {
365 struct hci_mon_hdr
*hdr
;
368 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_CONTROL
)
371 /* Ignore socket without the flag set */
372 if (!hci_sock_test_flag(sk
, flag
))
375 /* Skip the original socket */
379 skb
= bt_skb_alloc(6 + data_len
, GFP_ATOMIC
);
383 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
384 put_unaligned_le16(event
, skb_put(skb
, 2));
387 skb_put_data(skb
, data
, data_len
);
389 skb
->tstamp
= tstamp
;
391 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
392 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_EVENT
);
394 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
396 __hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
397 HCI_SOCK_TRUSTED
, NULL
);
401 read_unlock(&hci_sk_list
.lock
);
404 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
406 struct hci_mon_hdr
*hdr
;
407 struct hci_mon_new_index
*ni
;
408 struct hci_mon_index_info
*ii
;
414 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
418 ni
= skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
419 ni
->type
= hdev
->dev_type
;
421 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
422 memcpy(ni
->name
, hdev
->name
, 8);
424 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
428 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
432 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
436 if (hdev
->manufacturer
== 0xffff)
442 skb
= bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE
, GFP_ATOMIC
);
446 ii
= skb_put(skb
, HCI_MON_INDEX_INFO_SIZE
);
447 bacpy(&ii
->bdaddr
, &hdev
->bdaddr
);
448 ii
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
450 opcode
= cpu_to_le16(HCI_MON_INDEX_INFO
);
454 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
458 opcode
= cpu_to_le16(HCI_MON_OPEN_INDEX
);
462 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
466 opcode
= cpu_to_le16(HCI_MON_CLOSE_INDEX
);
473 __net_timestamp(skb
);
475 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
476 hdr
->opcode
= opcode
;
477 hdr
->index
= cpu_to_le16(hdev
->id
);
478 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
483 static struct sk_buff
*create_monitor_ctrl_open(struct sock
*sk
)
485 struct hci_mon_hdr
*hdr
;
491 /* No message needed when cookie is not present */
492 if (!hci_pi(sk
)->cookie
)
495 switch (hci_pi(sk
)->channel
) {
496 case HCI_CHANNEL_RAW
:
498 ver
[0] = BT_SUBSYS_VERSION
;
499 put_unaligned_le16(BT_SUBSYS_REVISION
, ver
+ 1);
501 case HCI_CHANNEL_USER
:
503 ver
[0] = BT_SUBSYS_VERSION
;
504 put_unaligned_le16(BT_SUBSYS_REVISION
, ver
+ 1);
506 case HCI_CHANNEL_CONTROL
:
508 mgmt_fill_version_info(ver
);
511 /* No message for unsupported format */
515 skb
= bt_skb_alloc(14 + TASK_COMM_LEN
, GFP_ATOMIC
);
519 flags
= hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) ? 0x1 : 0x0;
521 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
522 put_unaligned_le16(format
, skb_put(skb
, 2));
523 skb_put_data(skb
, ver
, sizeof(ver
));
524 put_unaligned_le32(flags
, skb_put(skb
, 4));
525 skb_put_u8(skb
, TASK_COMM_LEN
);
526 skb_put_data(skb
, hci_pi(sk
)->comm
, TASK_COMM_LEN
);
528 __net_timestamp(skb
);
530 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
531 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_OPEN
);
532 if (hci_pi(sk
)->hdev
)
533 hdr
->index
= cpu_to_le16(hci_pi(sk
)->hdev
->id
);
535 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
536 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
541 static struct sk_buff
*create_monitor_ctrl_close(struct sock
*sk
)
543 struct hci_mon_hdr
*hdr
;
546 /* No message needed when cookie is not present */
547 if (!hci_pi(sk
)->cookie
)
550 switch (hci_pi(sk
)->channel
) {
551 case HCI_CHANNEL_RAW
:
552 case HCI_CHANNEL_USER
:
553 case HCI_CHANNEL_CONTROL
:
556 /* No message for unsupported format */
560 skb
= bt_skb_alloc(4, GFP_ATOMIC
);
564 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
566 __net_timestamp(skb
);
568 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
569 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_CLOSE
);
570 if (hci_pi(sk
)->hdev
)
571 hdr
->index
= cpu_to_le16(hci_pi(sk
)->hdev
->id
);
573 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
574 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
579 static struct sk_buff
*create_monitor_ctrl_command(struct sock
*sk
, u16 index
,
583 struct hci_mon_hdr
*hdr
;
586 skb
= bt_skb_alloc(6 + len
, GFP_ATOMIC
);
590 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
591 put_unaligned_le16(opcode
, skb_put(skb
, 2));
594 skb_put_data(skb
, buf
, len
);
596 __net_timestamp(skb
);
598 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
599 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_COMMAND
);
600 hdr
->index
= cpu_to_le16(index
);
601 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
606 static void __printf(2, 3)
607 send_monitor_note(struct sock
*sk
, const char *fmt
, ...)
610 struct hci_mon_hdr
*hdr
;
615 len
= vsnprintf(NULL
, 0, fmt
, args
);
618 skb
= bt_skb_alloc(len
+ 1, GFP_ATOMIC
);
623 vsprintf(skb_put(skb
, len
), fmt
, args
);
624 *(u8
*)skb_put(skb
, 1) = 0;
627 __net_timestamp(skb
);
629 hdr
= (void *)skb_push(skb
, HCI_MON_HDR_SIZE
);
630 hdr
->opcode
= cpu_to_le16(HCI_MON_SYSTEM_NOTE
);
631 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
632 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
634 if (sock_queue_rcv_skb(sk
, skb
))
638 static void send_monitor_replay(struct sock
*sk
)
640 struct hci_dev
*hdev
;
642 read_lock(&hci_dev_list_lock
);
644 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
647 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
651 if (sock_queue_rcv_skb(sk
, skb
))
654 if (!test_bit(HCI_RUNNING
, &hdev
->flags
))
657 skb
= create_monitor_event(hdev
, HCI_DEV_OPEN
);
661 if (sock_queue_rcv_skb(sk
, skb
))
664 if (test_bit(HCI_UP
, &hdev
->flags
))
665 skb
= create_monitor_event(hdev
, HCI_DEV_UP
);
666 else if (hci_dev_test_flag(hdev
, HCI_SETUP
))
667 skb
= create_monitor_event(hdev
, HCI_DEV_SETUP
);
672 if (sock_queue_rcv_skb(sk
, skb
))
677 read_unlock(&hci_dev_list_lock
);
680 static void send_monitor_control_replay(struct sock
*mon_sk
)
684 read_lock(&hci_sk_list
.lock
);
686 sk_for_each(sk
, &hci_sk_list
.head
) {
689 skb
= create_monitor_ctrl_open(sk
);
693 if (sock_queue_rcv_skb(mon_sk
, skb
))
697 read_unlock(&hci_sk_list
.lock
);
700 /* Generate internal stack event */
701 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
703 struct hci_event_hdr
*hdr
;
704 struct hci_ev_stack_internal
*ev
;
707 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
711 hdr
= skb_put(skb
, HCI_EVENT_HDR_SIZE
);
712 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
713 hdr
->plen
= sizeof(*ev
) + dlen
;
715 ev
= skb_put(skb
, sizeof(*ev
) + dlen
);
717 memcpy(ev
->data
, data
, dlen
);
719 bt_cb(skb
)->incoming
= 1;
720 __net_timestamp(skb
);
722 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
723 hci_send_to_sock(hdev
, skb
);
727 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
729 BT_DBG("hdev %s event %d", hdev
->name
, event
);
731 if (atomic_read(&monitor_promisc
)) {
734 /* Send event to monitor */
735 skb
= create_monitor_event(hdev
, event
);
737 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
738 HCI_SOCK_TRUSTED
, NULL
);
743 if (event
<= HCI_DEV_DOWN
) {
744 struct hci_ev_si_device ev
;
746 /* Send event to sockets */
748 ev
.dev_id
= hdev
->id
;
749 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
752 if (event
== HCI_DEV_UNREG
) {
755 /* Detach sockets from device */
756 read_lock(&hci_sk_list
.lock
);
757 sk_for_each(sk
, &hci_sk_list
.head
) {
758 bh_lock_sock_nested(sk
);
759 if (hci_pi(sk
)->hdev
== hdev
) {
760 hci_pi(sk
)->hdev
= NULL
;
762 sk
->sk_state
= BT_OPEN
;
763 sk
->sk_state_change(sk
);
769 read_unlock(&hci_sk_list
.lock
);
773 static struct hci_mgmt_chan
*__hci_mgmt_chan_find(unsigned short channel
)
775 struct hci_mgmt_chan
*c
;
777 list_for_each_entry(c
, &mgmt_chan_list
, list
) {
778 if (c
->channel
== channel
)
785 static struct hci_mgmt_chan
*hci_mgmt_chan_find(unsigned short channel
)
787 struct hci_mgmt_chan
*c
;
789 mutex_lock(&mgmt_chan_list_lock
);
790 c
= __hci_mgmt_chan_find(channel
);
791 mutex_unlock(&mgmt_chan_list_lock
);
796 int hci_mgmt_chan_register(struct hci_mgmt_chan
*c
)
798 if (c
->channel
< HCI_CHANNEL_CONTROL
)
801 mutex_lock(&mgmt_chan_list_lock
);
802 if (__hci_mgmt_chan_find(c
->channel
)) {
803 mutex_unlock(&mgmt_chan_list_lock
);
807 list_add_tail(&c
->list
, &mgmt_chan_list
);
809 mutex_unlock(&mgmt_chan_list_lock
);
813 EXPORT_SYMBOL(hci_mgmt_chan_register
);
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan
*c
)
817 mutex_lock(&mgmt_chan_list_lock
);
819 mutex_unlock(&mgmt_chan_list_lock
);
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister
);
823 static int hci_sock_release(struct socket
*sock
)
825 struct sock
*sk
= sock
->sk
;
826 struct hci_dev
*hdev
;
829 BT_DBG("sock %p sk %p", sock
, sk
);
834 hdev
= hci_pi(sk
)->hdev
;
836 switch (hci_pi(sk
)->channel
) {
837 case HCI_CHANNEL_MONITOR
:
838 atomic_dec(&monitor_promisc
);
840 case HCI_CHANNEL_RAW
:
841 case HCI_CHANNEL_USER
:
842 case HCI_CHANNEL_CONTROL
:
843 /* Send event to monitor */
844 skb
= create_monitor_ctrl_close(sk
);
846 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
847 HCI_SOCK_TRUSTED
, NULL
);
851 hci_sock_free_cookie(sk
);
855 bt_sock_unlink(&hci_sk_list
, sk
);
858 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
859 /* When releasing a user channel exclusive access,
860 * call hci_dev_do_close directly instead of calling
861 * hci_dev_close to ensure the exclusive access will
862 * be released and the controller brought back down.
864 * The checking of HCI_AUTO_OFF is not needed in this
865 * case since it will have been cleared already when
866 * opening the user channel.
868 hci_dev_do_close(hdev
);
869 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
870 mgmt_index_added(hdev
);
873 atomic_dec(&hdev
->promisc
);
879 skb_queue_purge(&sk
->sk_receive_queue
);
880 skb_queue_purge(&sk
->sk_write_queue
);
886 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
891 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
896 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
898 hci_dev_unlock(hdev
);
903 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
908 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
913 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
915 hci_dev_unlock(hdev
);
920 /* Ioctls that require bound socket */
921 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
924 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
929 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
932 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
935 if (hdev
->dev_type
!= HCI_PRIMARY
)
940 if (!capable(CAP_NET_ADMIN
))
945 return hci_get_conn_info(hdev
, (void __user
*)arg
);
948 return hci_get_auth_info(hdev
, (void __user
*)arg
);
951 if (!capable(CAP_NET_ADMIN
))
953 return hci_sock_blacklist_add(hdev
, (void __user
*)arg
);
956 if (!capable(CAP_NET_ADMIN
))
958 return hci_sock_blacklist_del(hdev
, (void __user
*)arg
);
964 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
967 void __user
*argp
= (void __user
*)arg
;
968 struct sock
*sk
= sock
->sk
;
971 BT_DBG("cmd %x arg %lx", cmd
, arg
);
975 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
980 /* When calling an ioctl on an unbound raw socket, then ensure
981 * that the monitor gets informed. Ensure that the resulting event
982 * is only send once by checking if the cookie exists or not. The
983 * socket cookie will be only ever generated once for the lifetime
986 if (hci_sock_gen_cookie(sk
)) {
989 if (capable(CAP_NET_ADMIN
))
990 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
992 /* Send event to monitor */
993 skb
= create_monitor_ctrl_open(sk
);
995 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
996 HCI_SOCK_TRUSTED
, NULL
);
1005 return hci_get_dev_list(argp
);
1008 return hci_get_dev_info(argp
);
1010 case HCIGETCONNLIST
:
1011 return hci_get_conn_list(argp
);
1014 if (!capable(CAP_NET_ADMIN
))
1016 return hci_dev_open(arg
);
1019 if (!capable(CAP_NET_ADMIN
))
1021 return hci_dev_close(arg
);
1024 if (!capable(CAP_NET_ADMIN
))
1026 return hci_dev_reset(arg
);
1029 if (!capable(CAP_NET_ADMIN
))
1031 return hci_dev_reset_stat(arg
);
1038 case HCISETLINKMODE
:
1041 if (!capable(CAP_NET_ADMIN
))
1043 return hci_dev_cmd(cmd
, argp
);
1046 return hci_inquiry(argp
);
1051 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
1058 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
1061 struct sockaddr_hci haddr
;
1062 struct sock
*sk
= sock
->sk
;
1063 struct hci_dev
*hdev
= NULL
;
1064 struct sk_buff
*skb
;
1067 BT_DBG("sock %p sk %p", sock
, sk
);
1072 memset(&haddr
, 0, sizeof(haddr
));
1073 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
1074 memcpy(&haddr
, addr
, len
);
1076 if (haddr
.hci_family
!= AF_BLUETOOTH
)
1081 if (sk
->sk_state
== BT_BOUND
) {
1086 switch (haddr
.hci_channel
) {
1087 case HCI_CHANNEL_RAW
:
1088 if (hci_pi(sk
)->hdev
) {
1093 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1094 hdev
= hci_dev_get(haddr
.hci_dev
);
1100 atomic_inc(&hdev
->promisc
);
1103 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1105 if (!hci_sock_gen_cookie(sk
)) {
1106 /* In the case when a cookie has already been assigned,
1107 * then there has been already an ioctl issued against
1108 * an unbound socket and with that triggerd an open
1109 * notification. Send a close notification first to
1110 * allow the state transition to bounded.
1112 skb
= create_monitor_ctrl_close(sk
);
1114 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1115 HCI_SOCK_TRUSTED
, NULL
);
1120 if (capable(CAP_NET_ADMIN
))
1121 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1123 hci_pi(sk
)->hdev
= hdev
;
1125 /* Send event to monitor */
1126 skb
= create_monitor_ctrl_open(sk
);
1128 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1129 HCI_SOCK_TRUSTED
, NULL
);
1134 case HCI_CHANNEL_USER
:
1135 if (hci_pi(sk
)->hdev
) {
1140 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
1145 if (!capable(CAP_NET_ADMIN
)) {
1150 hdev
= hci_dev_get(haddr
.hci_dev
);
1156 if (test_bit(HCI_INIT
, &hdev
->flags
) ||
1157 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1158 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1159 (!hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) &&
1160 test_bit(HCI_UP
, &hdev
->flags
))) {
1166 if (hci_dev_test_and_set_flag(hdev
, HCI_USER_CHANNEL
)) {
1172 mgmt_index_removed(hdev
);
1174 err
= hci_dev_open(hdev
->id
);
1176 if (err
== -EALREADY
) {
1177 /* In case the transport is already up and
1178 * running, clear the error here.
1180 * This can happen when opening a user
1181 * channel and HCI_AUTO_OFF grace period
1186 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
1187 mgmt_index_added(hdev
);
1193 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1195 if (!hci_sock_gen_cookie(sk
)) {
1196 /* In the case when a cookie has already been assigned,
1197 * this socket will transition from a raw socket into
1198 * a user channel socket. For a clean transition, send
1199 * the close notification first.
1201 skb
= create_monitor_ctrl_close(sk
);
1203 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1204 HCI_SOCK_TRUSTED
, NULL
);
1209 /* The user channel is restricted to CAP_NET_ADMIN
1210 * capabilities and with that implicitly trusted.
1212 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1214 hci_pi(sk
)->hdev
= hdev
;
1216 /* Send event to monitor */
1217 skb
= create_monitor_ctrl_open(sk
);
1219 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1220 HCI_SOCK_TRUSTED
, NULL
);
1224 atomic_inc(&hdev
->promisc
);
1227 case HCI_CHANNEL_MONITOR
:
1228 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1233 if (!capable(CAP_NET_RAW
)) {
1238 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1240 /* The monitor interface is restricted to CAP_NET_RAW
1241 * capabilities and with that implicitly trusted.
1243 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1245 send_monitor_note(sk
, "Linux version %s (%s)",
1246 init_utsname()->release
,
1247 init_utsname()->machine
);
1248 send_monitor_note(sk
, "Bluetooth subsystem version %u.%u",
1249 BT_SUBSYS_VERSION
, BT_SUBSYS_REVISION
);
1250 send_monitor_replay(sk
);
1251 send_monitor_control_replay(sk
);
1253 atomic_inc(&monitor_promisc
);
1256 case HCI_CHANNEL_LOGGING
:
1257 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1262 if (!capable(CAP_NET_ADMIN
)) {
1267 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1271 if (!hci_mgmt_chan_find(haddr
.hci_channel
)) {
1276 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1281 /* Users with CAP_NET_ADMIN capabilities are allowed
1282 * access to all management commands and events. For
1283 * untrusted users the interface is restricted and
1284 * also only untrusted events are sent.
1286 if (capable(CAP_NET_ADMIN
))
1287 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1289 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1291 /* At the moment the index and unconfigured index events
1292 * are enabled unconditionally. Setting them on each
1293 * socket when binding keeps this functionality. They
1294 * however might be cleared later and then sending of these
1295 * events will be disabled, but that is then intentional.
1297 * This also enables generic events that are safe to be
1298 * received by untrusted users. Example for such events
1299 * are changes to settings, class of device, name etc.
1301 if (hci_pi(sk
)->channel
== HCI_CHANNEL_CONTROL
) {
1302 if (!hci_sock_gen_cookie(sk
)) {
1303 /* In the case when a cookie has already been
1304 * assigned, this socket will transtion from
1305 * a raw socket into a control socket. To
1306 * allow for a clean transtion, send the
1307 * close notification first.
1309 skb
= create_monitor_ctrl_close(sk
);
1311 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1312 HCI_SOCK_TRUSTED
, NULL
);
1317 /* Send event to monitor */
1318 skb
= create_monitor_ctrl_open(sk
);
1320 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1321 HCI_SOCK_TRUSTED
, NULL
);
1325 hci_sock_set_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
1326 hci_sock_set_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
1327 hci_sock_set_flag(sk
, HCI_MGMT_OPTION_EVENTS
);
1328 hci_sock_set_flag(sk
, HCI_MGMT_SETTING_EVENTS
);
1329 hci_sock_set_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
1330 hci_sock_set_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
1335 sk
->sk_state
= BT_BOUND
;
1342 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
1345 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*)addr
;
1346 struct sock
*sk
= sock
->sk
;
1347 struct hci_dev
*hdev
;
1350 BT_DBG("sock %p sk %p", sock
, sk
);
1357 hdev
= hci_pi(sk
)->hdev
;
1363 haddr
->hci_family
= AF_BLUETOOTH
;
1364 haddr
->hci_dev
= hdev
->id
;
1365 haddr
->hci_channel
= hci_pi(sk
)->channel
;
1366 err
= sizeof(*haddr
);
1373 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
1374 struct sk_buff
*skb
)
1376 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
1378 if (mask
& HCI_CMSG_DIR
) {
1379 int incoming
= bt_cb(skb
)->incoming
;
1380 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
1384 if (mask
& HCI_CMSG_TSTAMP
) {
1385 #ifdef CONFIG_COMPAT
1386 struct compat_timeval ctv
;
1392 skb_get_timestamp(skb
, &tv
);
1396 #ifdef CONFIG_COMPAT
1397 if (!COMPAT_USE_64BIT_TIME
&&
1398 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
1399 ctv
.tv_sec
= tv
.tv_sec
;
1400 ctv
.tv_usec
= tv
.tv_usec
;
1406 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
1410 static int hci_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1411 size_t len
, int flags
)
1413 int noblock
= flags
& MSG_DONTWAIT
;
1414 struct sock
*sk
= sock
->sk
;
1415 struct sk_buff
*skb
;
1417 unsigned int skblen
;
1419 BT_DBG("sock %p, sk %p", sock
, sk
);
1421 if (flags
& MSG_OOB
)
1424 if (hci_pi(sk
)->channel
== HCI_CHANNEL_LOGGING
)
1427 if (sk
->sk_state
== BT_CLOSED
)
1430 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1437 msg
->msg_flags
|= MSG_TRUNC
;
1441 skb_reset_transport_header(skb
);
1442 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
1444 switch (hci_pi(sk
)->channel
) {
1445 case HCI_CHANNEL_RAW
:
1446 hci_sock_cmsg(sk
, msg
, skb
);
1448 case HCI_CHANNEL_USER
:
1449 case HCI_CHANNEL_MONITOR
:
1450 sock_recv_timestamp(msg
, sk
, skb
);
1453 if (hci_mgmt_chan_find(hci_pi(sk
)->channel
))
1454 sock_recv_timestamp(msg
, sk
, skb
);
1458 skb_free_datagram(sk
, skb
);
1460 if (flags
& MSG_TRUNC
)
1463 return err
? : copied
;
1466 static int hci_mgmt_cmd(struct hci_mgmt_chan
*chan
, struct sock
*sk
,
1467 struct msghdr
*msg
, size_t msglen
)
1471 struct mgmt_hdr
*hdr
;
1472 u16 opcode
, index
, len
;
1473 struct hci_dev
*hdev
= NULL
;
1474 const struct hci_mgmt_handler
*handler
;
1475 bool var_len
, no_hdev
;
1478 BT_DBG("got %zu bytes", msglen
);
1480 if (msglen
< sizeof(*hdr
))
1483 buf
= kmalloc(msglen
, GFP_KERNEL
);
1487 if (memcpy_from_msg(buf
, msg
, msglen
)) {
1493 opcode
= __le16_to_cpu(hdr
->opcode
);
1494 index
= __le16_to_cpu(hdr
->index
);
1495 len
= __le16_to_cpu(hdr
->len
);
1497 if (len
!= msglen
- sizeof(*hdr
)) {
1502 if (chan
->channel
== HCI_CHANNEL_CONTROL
) {
1503 struct sk_buff
*skb
;
1505 /* Send event to monitor */
1506 skb
= create_monitor_ctrl_command(sk
, index
, opcode
, len
,
1507 buf
+ sizeof(*hdr
));
1509 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1510 HCI_SOCK_TRUSTED
, NULL
);
1515 if (opcode
>= chan
->handler_count
||
1516 chan
->handlers
[opcode
].func
== NULL
) {
1517 BT_DBG("Unknown op %u", opcode
);
1518 err
= mgmt_cmd_status(sk
, index
, opcode
,
1519 MGMT_STATUS_UNKNOWN_COMMAND
);
1523 handler
= &chan
->handlers
[opcode
];
1525 if (!hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) &&
1526 !(handler
->flags
& HCI_MGMT_UNTRUSTED
)) {
1527 err
= mgmt_cmd_status(sk
, index
, opcode
,
1528 MGMT_STATUS_PERMISSION_DENIED
);
1532 if (index
!= MGMT_INDEX_NONE
) {
1533 hdev
= hci_dev_get(index
);
1535 err
= mgmt_cmd_status(sk
, index
, opcode
,
1536 MGMT_STATUS_INVALID_INDEX
);
1540 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1541 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1542 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1543 err
= mgmt_cmd_status(sk
, index
, opcode
,
1544 MGMT_STATUS_INVALID_INDEX
);
1548 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1549 !(handler
->flags
& HCI_MGMT_UNCONFIGURED
)) {
1550 err
= mgmt_cmd_status(sk
, index
, opcode
,
1551 MGMT_STATUS_INVALID_INDEX
);
1556 no_hdev
= (handler
->flags
& HCI_MGMT_NO_HDEV
);
1557 if (no_hdev
!= !hdev
) {
1558 err
= mgmt_cmd_status(sk
, index
, opcode
,
1559 MGMT_STATUS_INVALID_INDEX
);
1563 var_len
= (handler
->flags
& HCI_MGMT_VAR_LEN
);
1564 if ((var_len
&& len
< handler
->data_len
) ||
1565 (!var_len
&& len
!= handler
->data_len
)) {
1566 err
= mgmt_cmd_status(sk
, index
, opcode
,
1567 MGMT_STATUS_INVALID_PARAMS
);
1571 if (hdev
&& chan
->hdev_init
)
1572 chan
->hdev_init(sk
, hdev
);
1574 cp
= buf
+ sizeof(*hdr
);
1576 err
= handler
->func(sk
, hdev
, cp
, len
);
1590 static int hci_logging_frame(struct sock
*sk
, struct msghdr
*msg
, int len
)
1592 struct hci_mon_hdr
*hdr
;
1593 struct sk_buff
*skb
;
1594 struct hci_dev
*hdev
;
1598 /* The logging frame consists at minimum of the standard header,
1599 * the priority byte, the ident length byte and at least one string
1600 * terminator NUL byte. Anything shorter are invalid packets.
1602 if (len
< sizeof(*hdr
) + 3)
1605 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1609 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1614 hdr
= (void *)skb
->data
;
1616 if (__le16_to_cpu(hdr
->len
) != len
- sizeof(*hdr
)) {
1621 if (__le16_to_cpu(hdr
->opcode
) == 0x0000) {
1622 __u8 priority
= skb
->data
[sizeof(*hdr
)];
1623 __u8 ident_len
= skb
->data
[sizeof(*hdr
) + 1];
1625 /* Only the priorities 0-7 are valid and with that any other
1626 * value results in an invalid packet.
1628 * The priority byte is followed by an ident length byte and
1629 * the NUL terminated ident string. Check that the ident
1630 * length is not overflowing the packet and also that the
1631 * ident string itself is NUL terminated. In case the ident
1632 * length is zero, the length value actually doubles as NUL
1633 * terminator identifier.
1635 * The message follows the ident string (if present) and
1636 * must be NUL terminated. Otherwise it is not a valid packet.
1638 if (priority
> 7 || skb
->data
[len
- 1] != 0x00 ||
1639 ident_len
> len
- sizeof(*hdr
) - 3 ||
1640 skb
->data
[sizeof(*hdr
) + ident_len
+ 1] != 0x00) {
1649 index
= __le16_to_cpu(hdr
->index
);
1651 if (index
!= MGMT_INDEX_NONE
) {
1652 hdev
= hci_dev_get(index
);
1661 hdr
->opcode
= cpu_to_le16(HCI_MON_USER_LOGGING
);
1663 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
, HCI_SOCK_TRUSTED
, NULL
);
1674 static int hci_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1677 struct sock
*sk
= sock
->sk
;
1678 struct hci_mgmt_chan
*chan
;
1679 struct hci_dev
*hdev
;
1680 struct sk_buff
*skb
;
1683 BT_DBG("sock %p sk %p", sock
, sk
);
1685 if (msg
->msg_flags
& MSG_OOB
)
1688 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
|
1692 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
1697 switch (hci_pi(sk
)->channel
) {
1698 case HCI_CHANNEL_RAW
:
1699 case HCI_CHANNEL_USER
:
1701 case HCI_CHANNEL_MONITOR
:
1704 case HCI_CHANNEL_LOGGING
:
1705 err
= hci_logging_frame(sk
, msg
, len
);
1708 mutex_lock(&mgmt_chan_list_lock
);
1709 chan
= __hci_mgmt_chan_find(hci_pi(sk
)->channel
);
1711 err
= hci_mgmt_cmd(chan
, sk
, msg
, len
);
1715 mutex_unlock(&mgmt_chan_list_lock
);
1719 hdev
= hci_pi(sk
)->hdev
;
1725 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1730 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1734 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1739 hci_skb_pkt_type(skb
) = skb
->data
[0];
1742 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
1743 /* No permission check is needed for user channel
1744 * since that gets enforced when binding the socket.
1746 * However check that the packet type is valid.
1748 if (hci_skb_pkt_type(skb
) != HCI_COMMAND_PKT
&&
1749 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
1750 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
1755 skb_queue_tail(&hdev
->raw_q
, skb
);
1756 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1757 } else if (hci_skb_pkt_type(skb
) == HCI_COMMAND_PKT
) {
1758 u16 opcode
= get_unaligned_le16(skb
->data
);
1759 u16 ogf
= hci_opcode_ogf(opcode
);
1760 u16 ocf
= hci_opcode_ocf(opcode
);
1762 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
1763 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
1764 &hci_sec_filter
.ocf_mask
[ogf
])) &&
1765 !capable(CAP_NET_RAW
)) {
1770 /* Since the opcode has already been extracted here, store
1771 * a copy of the value for later use by the drivers.
1773 hci_skb_opcode(skb
) = opcode
;
1776 skb_queue_tail(&hdev
->raw_q
, skb
);
1777 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1779 /* Stand-alone HCI commands must be flagged as
1780 * single-command requests.
1782 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
1784 skb_queue_tail(&hdev
->cmd_q
, skb
);
1785 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1788 if (!capable(CAP_NET_RAW
)) {
1793 if (hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
1794 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
1799 skb_queue_tail(&hdev
->raw_q
, skb
);
1800 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1814 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1815 char __user
*optval
, unsigned int len
)
1817 struct hci_ufilter uf
= { .opcode
= 0 };
1818 struct sock
*sk
= sock
->sk
;
1819 int err
= 0, opt
= 0;
1821 BT_DBG("sk %p, opt %d", sk
, optname
);
1823 if (level
!= SOL_HCI
)
1824 return -ENOPROTOOPT
;
1828 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1835 if (get_user(opt
, (int __user
*)optval
)) {
1841 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1843 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1846 case HCI_TIME_STAMP
:
1847 if (get_user(opt
, (int __user
*)optval
)) {
1853 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1855 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1860 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1862 uf
.type_mask
= f
->type_mask
;
1863 uf
.opcode
= f
->opcode
;
1864 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1865 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1868 len
= min_t(unsigned int, len
, sizeof(uf
));
1869 if (copy_from_user(&uf
, optval
, len
)) {
1874 if (!capable(CAP_NET_RAW
)) {
1875 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1876 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1877 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1881 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1883 f
->type_mask
= uf
.type_mask
;
1884 f
->opcode
= uf
.opcode
;
1885 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1886 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1900 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1901 char __user
*optval
, int __user
*optlen
)
1903 struct hci_ufilter uf
;
1904 struct sock
*sk
= sock
->sk
;
1905 int len
, opt
, err
= 0;
1907 BT_DBG("sk %p, opt %d", sk
, optname
);
1909 if (level
!= SOL_HCI
)
1910 return -ENOPROTOOPT
;
1912 if (get_user(len
, optlen
))
1917 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1924 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1929 if (put_user(opt
, optval
))
1933 case HCI_TIME_STAMP
:
1934 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1939 if (put_user(opt
, optval
))
1945 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1947 memset(&uf
, 0, sizeof(uf
));
1948 uf
.type_mask
= f
->type_mask
;
1949 uf
.opcode
= f
->opcode
;
1950 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1951 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1954 len
= min_t(unsigned int, len
, sizeof(uf
));
1955 if (copy_to_user(optval
, &uf
, len
))
1969 static const struct proto_ops hci_sock_ops
= {
1970 .family
= PF_BLUETOOTH
,
1971 .owner
= THIS_MODULE
,
1972 .release
= hci_sock_release
,
1973 .bind
= hci_sock_bind
,
1974 .getname
= hci_sock_getname
,
1975 .sendmsg
= hci_sock_sendmsg
,
1976 .recvmsg
= hci_sock_recvmsg
,
1977 .ioctl
= hci_sock_ioctl
,
1978 .poll
= datagram_poll
,
1979 .listen
= sock_no_listen
,
1980 .shutdown
= sock_no_shutdown
,
1981 .setsockopt
= hci_sock_setsockopt
,
1982 .getsockopt
= hci_sock_getsockopt
,
1983 .connect
= sock_no_connect
,
1984 .socketpair
= sock_no_socketpair
,
1985 .accept
= sock_no_accept
,
1986 .mmap
= sock_no_mmap
1989 static struct proto hci_sk_proto
= {
1991 .owner
= THIS_MODULE
,
1992 .obj_size
= sizeof(struct hci_pinfo
)
1995 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
2000 BT_DBG("sock %p", sock
);
2002 if (sock
->type
!= SOCK_RAW
)
2003 return -ESOCKTNOSUPPORT
;
2005 sock
->ops
= &hci_sock_ops
;
2007 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
, kern
);
2011 sock_init_data(sock
, sk
);
2013 sock_reset_flag(sk
, SOCK_ZAPPED
);
2015 sk
->sk_protocol
= protocol
;
2017 sock
->state
= SS_UNCONNECTED
;
2018 sk
->sk_state
= BT_OPEN
;
2020 bt_sock_link(&hci_sk_list
, sk
);
2024 static const struct net_proto_family hci_sock_family_ops
= {
2025 .family
= PF_BLUETOOTH
,
2026 .owner
= THIS_MODULE
,
2027 .create
= hci_sock_create
,
2030 int __init
hci_sock_init(void)
2034 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
2036 err
= proto_register(&hci_sk_proto
, 0);
2040 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
2042 BT_ERR("HCI socket registration failed");
2046 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
2048 BT_ERR("Failed to create HCI proc file");
2049 bt_sock_unregister(BTPROTO_HCI
);
2053 BT_INFO("HCI socket layer initialized");
2058 proto_unregister(&hci_sk_proto
);
2062 void hci_sock_cleanup(void)
2064 bt_procfs_cleanup(&init_net
, "hci");
2065 bt_sock_unregister(BTPROTO_HCI
);
2066 proto_unregister(&hci_sk_proto
);