2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list
);
40 static DEFINE_MUTEX(mgmt_chan_list_lock
);
42 static DEFINE_IDA(sock_cookie_ida
);
44 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter
;
56 unsigned short channel
;
59 char comm
[TASK_COMM_LEN
];
62 void hci_sock_set_flag(struct sock
*sk
, int nr
)
64 set_bit(nr
, &hci_pi(sk
)->flags
);
67 void hci_sock_clear_flag(struct sock
*sk
, int nr
)
69 clear_bit(nr
, &hci_pi(sk
)->flags
);
72 int hci_sock_test_flag(struct sock
*sk
, int nr
)
74 return test_bit(nr
, &hci_pi(sk
)->flags
);
77 unsigned short hci_sock_get_channel(struct sock
*sk
)
79 return hci_pi(sk
)->channel
;
82 u32
hci_sock_get_cookie(struct sock
*sk
)
84 return hci_pi(sk
)->cookie
;
87 static bool hci_sock_gen_cookie(struct sock
*sk
)
89 int id
= hci_pi(sk
)->cookie
;
92 id
= ida_simple_get(&sock_cookie_ida
, 1, 0, GFP_KERNEL
);
96 hci_pi(sk
)->cookie
= id
;
97 get_task_comm(hci_pi(sk
)->comm
, current
);
104 static void hci_sock_free_cookie(struct sock
*sk
)
106 int id
= hci_pi(sk
)->cookie
;
109 hci_pi(sk
)->cookie
= 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida
, id
);
114 static inline int hci_test_bit(int nr
, const void *addr
)
116 return *((const __u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF 5
122 struct hci_sec_filter
{
125 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
128 static const struct hci_sec_filter hci_sec_filter
= {
132 { 0x1000d9fe, 0x0000b00c },
137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 /* OGF_LINK_POLICY */
139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 /* OGF_STATUS_PARAM */
145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
149 static struct bt_sock_list hci_sk_list
= {
150 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
153 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
155 struct hci_filter
*flt
;
156 int flt_type
, flt_event
;
159 flt
= &hci_pi(sk
)->filter
;
161 flt_type
= hci_skb_pkt_type(skb
) & HCI_FLT_TYPE_BITS
;
163 if (!test_bit(flt_type
, &flt
->type_mask
))
166 /* Extra filter for event packets only */
167 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
)
170 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
172 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
175 /* Check filter only when opcode is set */
179 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
180 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
183 if (flt_event
== HCI_EV_CMD_STATUS
&&
184 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
190 /* Send frame to RAW socket */
191 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
194 struct sk_buff
*skb_copy
= NULL
;
196 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
198 read_lock(&hci_sk_list
.lock
);
200 sk_for_each(sk
, &hci_sk_list
.head
) {
201 struct sk_buff
*nskb
;
203 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
206 /* Don't send frame to the socket it came from */
210 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
211 if (hci_skb_pkt_type(skb
) != HCI_COMMAND_PKT
&&
212 hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
213 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
214 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
)
216 if (is_filtered_packet(sk
, skb
))
218 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
219 if (!bt_cb(skb
)->incoming
)
221 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
222 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
223 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
)
226 /* Don't send frame to other channel types */
231 /* Create a private copy with headroom */
232 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
236 /* Put type byte before the data */
237 memcpy(skb_push(skb_copy
, 1), &hci_skb_pkt_type(skb
), 1);
240 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
244 if (sock_queue_rcv_skb(sk
, nskb
))
248 read_unlock(&hci_sk_list
.lock
);
253 /* Send frame to sockets with specific channel */
254 static void __hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
255 int flag
, struct sock
*skip_sk
)
259 BT_DBG("channel %u len %d", channel
, skb
->len
);
261 sk_for_each(sk
, &hci_sk_list
.head
) {
262 struct sk_buff
*nskb
;
264 /* Ignore socket without the flag set */
265 if (!hci_sock_test_flag(sk
, flag
))
268 /* Skip the original socket */
272 if (sk
->sk_state
!= BT_BOUND
)
275 if (hci_pi(sk
)->channel
!= channel
)
278 nskb
= skb_clone(skb
, GFP_ATOMIC
);
282 if (sock_queue_rcv_skb(sk
, nskb
))
288 void hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
289 int flag
, struct sock
*skip_sk
)
291 read_lock(&hci_sk_list
.lock
);
292 __hci_send_to_channel(channel
, skb
, flag
, skip_sk
);
293 read_unlock(&hci_sk_list
.lock
);
296 /* Send frame to monitor socket */
297 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
299 struct sk_buff
*skb_copy
= NULL
;
300 struct hci_mon_hdr
*hdr
;
303 if (!atomic_read(&monitor_promisc
))
306 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
308 switch (hci_skb_pkt_type(skb
)) {
309 case HCI_COMMAND_PKT
:
310 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
313 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
315 case HCI_ACLDATA_PKT
:
316 if (bt_cb(skb
)->incoming
)
317 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
319 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
321 case HCI_SCODATA_PKT
:
322 if (bt_cb(skb
)->incoming
)
323 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
325 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
328 opcode
= cpu_to_le16(HCI_MON_VENDOR_DIAG
);
334 /* Create a private copy with headroom */
335 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
339 /* Put header before the data */
340 hdr
= skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
341 hdr
->opcode
= opcode
;
342 hdr
->index
= cpu_to_le16(hdev
->id
);
343 hdr
->len
= cpu_to_le16(skb
->len
);
345 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb_copy
,
346 HCI_SOCK_TRUSTED
, NULL
);
350 void hci_send_monitor_ctrl_event(struct hci_dev
*hdev
, u16 event
,
351 void *data
, u16 data_len
, ktime_t tstamp
,
352 int flag
, struct sock
*skip_sk
)
358 index
= cpu_to_le16(hdev
->id
);
360 index
= cpu_to_le16(MGMT_INDEX_NONE
);
362 read_lock(&hci_sk_list
.lock
);
364 sk_for_each(sk
, &hci_sk_list
.head
) {
365 struct hci_mon_hdr
*hdr
;
368 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_CONTROL
)
371 /* Ignore socket without the flag set */
372 if (!hci_sock_test_flag(sk
, flag
))
375 /* Skip the original socket */
379 skb
= bt_skb_alloc(6 + data_len
, GFP_ATOMIC
);
383 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
384 put_unaligned_le16(event
, skb_put(skb
, 2));
387 skb_put_data(skb
, data
, data_len
);
389 skb
->tstamp
= tstamp
;
391 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
392 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_EVENT
);
394 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
396 __hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
397 HCI_SOCK_TRUSTED
, NULL
);
401 read_unlock(&hci_sk_list
.lock
);
404 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
406 struct hci_mon_hdr
*hdr
;
407 struct hci_mon_new_index
*ni
;
408 struct hci_mon_index_info
*ii
;
414 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
418 ni
= skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
419 ni
->type
= hdev
->dev_type
;
421 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
422 memcpy(ni
->name
, hdev
->name
, 8);
424 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
428 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
432 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
436 if (hdev
->manufacturer
== 0xffff)
442 skb
= bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE
, GFP_ATOMIC
);
446 ii
= skb_put(skb
, HCI_MON_INDEX_INFO_SIZE
);
447 bacpy(&ii
->bdaddr
, &hdev
->bdaddr
);
448 ii
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
450 opcode
= cpu_to_le16(HCI_MON_INDEX_INFO
);
454 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
458 opcode
= cpu_to_le16(HCI_MON_OPEN_INDEX
);
462 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
466 opcode
= cpu_to_le16(HCI_MON_CLOSE_INDEX
);
473 __net_timestamp(skb
);
475 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
476 hdr
->opcode
= opcode
;
477 hdr
->index
= cpu_to_le16(hdev
->id
);
478 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
483 static struct sk_buff
*create_monitor_ctrl_open(struct sock
*sk
)
485 struct hci_mon_hdr
*hdr
;
491 /* No message needed when cookie is not present */
492 if (!hci_pi(sk
)->cookie
)
495 switch (hci_pi(sk
)->channel
) {
496 case HCI_CHANNEL_RAW
:
498 ver
[0] = BT_SUBSYS_VERSION
;
499 put_unaligned_le16(BT_SUBSYS_REVISION
, ver
+ 1);
501 case HCI_CHANNEL_USER
:
503 ver
[0] = BT_SUBSYS_VERSION
;
504 put_unaligned_le16(BT_SUBSYS_REVISION
, ver
+ 1);
506 case HCI_CHANNEL_CONTROL
:
508 mgmt_fill_version_info(ver
);
511 /* No message for unsupported format */
515 skb
= bt_skb_alloc(14 + TASK_COMM_LEN
, GFP_ATOMIC
);
519 flags
= hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) ? 0x1 : 0x0;
521 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
522 put_unaligned_le16(format
, skb_put(skb
, 2));
523 skb_put_data(skb
, ver
, sizeof(ver
));
524 put_unaligned_le32(flags
, skb_put(skb
, 4));
525 skb_put_u8(skb
, TASK_COMM_LEN
);
526 skb_put_data(skb
, hci_pi(sk
)->comm
, TASK_COMM_LEN
);
528 __net_timestamp(skb
);
530 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
531 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_OPEN
);
532 if (hci_pi(sk
)->hdev
)
533 hdr
->index
= cpu_to_le16(hci_pi(sk
)->hdev
->id
);
535 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
536 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
541 static struct sk_buff
*create_monitor_ctrl_close(struct sock
*sk
)
543 struct hci_mon_hdr
*hdr
;
546 /* No message needed when cookie is not present */
547 if (!hci_pi(sk
)->cookie
)
550 switch (hci_pi(sk
)->channel
) {
551 case HCI_CHANNEL_RAW
:
552 case HCI_CHANNEL_USER
:
553 case HCI_CHANNEL_CONTROL
:
556 /* No message for unsupported format */
560 skb
= bt_skb_alloc(4, GFP_ATOMIC
);
564 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
566 __net_timestamp(skb
);
568 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
569 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_CLOSE
);
570 if (hci_pi(sk
)->hdev
)
571 hdr
->index
= cpu_to_le16(hci_pi(sk
)->hdev
->id
);
573 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
574 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
579 static struct sk_buff
*create_monitor_ctrl_command(struct sock
*sk
, u16 index
,
583 struct hci_mon_hdr
*hdr
;
586 skb
= bt_skb_alloc(6 + len
, GFP_ATOMIC
);
590 put_unaligned_le32(hci_pi(sk
)->cookie
, skb_put(skb
, 4));
591 put_unaligned_le16(opcode
, skb_put(skb
, 2));
594 skb_put_data(skb
, buf
, len
);
596 __net_timestamp(skb
);
598 hdr
= skb_push(skb
, HCI_MON_HDR_SIZE
);
599 hdr
->opcode
= cpu_to_le16(HCI_MON_CTRL_COMMAND
);
600 hdr
->index
= cpu_to_le16(index
);
601 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
606 static void __printf(2, 3)
607 send_monitor_note(struct sock
*sk
, const char *fmt
, ...)
610 struct hci_mon_hdr
*hdr
;
615 len
= vsnprintf(NULL
, 0, fmt
, args
);
618 skb
= bt_skb_alloc(len
+ 1, GFP_ATOMIC
);
623 vsprintf(skb_put(skb
, len
), fmt
, args
);
624 *(u8
*)skb_put(skb
, 1) = 0;
627 __net_timestamp(skb
);
629 hdr
= (void *)skb_push(skb
, HCI_MON_HDR_SIZE
);
630 hdr
->opcode
= cpu_to_le16(HCI_MON_SYSTEM_NOTE
);
631 hdr
->index
= cpu_to_le16(HCI_DEV_NONE
);
632 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
634 if (sock_queue_rcv_skb(sk
, skb
))
638 static void send_monitor_replay(struct sock
*sk
)
640 struct hci_dev
*hdev
;
642 read_lock(&hci_dev_list_lock
);
644 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
647 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
651 if (sock_queue_rcv_skb(sk
, skb
))
654 if (!test_bit(HCI_RUNNING
, &hdev
->flags
))
657 skb
= create_monitor_event(hdev
, HCI_DEV_OPEN
);
661 if (sock_queue_rcv_skb(sk
, skb
))
664 if (test_bit(HCI_UP
, &hdev
->flags
))
665 skb
= create_monitor_event(hdev
, HCI_DEV_UP
);
666 else if (hci_dev_test_flag(hdev
, HCI_SETUP
))
667 skb
= create_monitor_event(hdev
, HCI_DEV_SETUP
);
672 if (sock_queue_rcv_skb(sk
, skb
))
677 read_unlock(&hci_dev_list_lock
);
680 static void send_monitor_control_replay(struct sock
*mon_sk
)
684 read_lock(&hci_sk_list
.lock
);
686 sk_for_each(sk
, &hci_sk_list
.head
) {
689 skb
= create_monitor_ctrl_open(sk
);
693 if (sock_queue_rcv_skb(mon_sk
, skb
))
697 read_unlock(&hci_sk_list
.lock
);
700 /* Generate internal stack event */
701 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
703 struct hci_event_hdr
*hdr
;
704 struct hci_ev_stack_internal
*ev
;
707 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
711 hdr
= skb_put(skb
, HCI_EVENT_HDR_SIZE
);
712 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
713 hdr
->plen
= sizeof(*ev
) + dlen
;
715 ev
= skb_put(skb
, sizeof(*ev
) + dlen
);
717 memcpy(ev
->data
, data
, dlen
);
719 bt_cb(skb
)->incoming
= 1;
720 __net_timestamp(skb
);
722 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
723 hci_send_to_sock(hdev
, skb
);
727 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
729 BT_DBG("hdev %s event %d", hdev
->name
, event
);
731 if (atomic_read(&monitor_promisc
)) {
734 /* Send event to monitor */
735 skb
= create_monitor_event(hdev
, event
);
737 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
738 HCI_SOCK_TRUSTED
, NULL
);
743 if (event
<= HCI_DEV_DOWN
) {
744 struct hci_ev_si_device ev
;
746 /* Send event to sockets */
748 ev
.dev_id
= hdev
->id
;
749 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
752 if (event
== HCI_DEV_UNREG
) {
755 /* Detach sockets from device */
756 read_lock(&hci_sk_list
.lock
);
757 sk_for_each(sk
, &hci_sk_list
.head
) {
758 bh_lock_sock_nested(sk
);
759 if (hci_pi(sk
)->hdev
== hdev
) {
760 hci_pi(sk
)->hdev
= NULL
;
762 sk
->sk_state
= BT_OPEN
;
763 sk
->sk_state_change(sk
);
769 read_unlock(&hci_sk_list
.lock
);
773 static struct hci_mgmt_chan
*__hci_mgmt_chan_find(unsigned short channel
)
775 struct hci_mgmt_chan
*c
;
777 list_for_each_entry(c
, &mgmt_chan_list
, list
) {
778 if (c
->channel
== channel
)
785 static struct hci_mgmt_chan
*hci_mgmt_chan_find(unsigned short channel
)
787 struct hci_mgmt_chan
*c
;
789 mutex_lock(&mgmt_chan_list_lock
);
790 c
= __hci_mgmt_chan_find(channel
);
791 mutex_unlock(&mgmt_chan_list_lock
);
796 int hci_mgmt_chan_register(struct hci_mgmt_chan
*c
)
798 if (c
->channel
< HCI_CHANNEL_CONTROL
)
801 mutex_lock(&mgmt_chan_list_lock
);
802 if (__hci_mgmt_chan_find(c
->channel
)) {
803 mutex_unlock(&mgmt_chan_list_lock
);
807 list_add_tail(&c
->list
, &mgmt_chan_list
);
809 mutex_unlock(&mgmt_chan_list_lock
);
813 EXPORT_SYMBOL(hci_mgmt_chan_register
);
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan
*c
)
817 mutex_lock(&mgmt_chan_list_lock
);
819 mutex_unlock(&mgmt_chan_list_lock
);
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister
);
823 static int hci_sock_release(struct socket
*sock
)
825 struct sock
*sk
= sock
->sk
;
826 struct hci_dev
*hdev
;
829 BT_DBG("sock %p sk %p", sock
, sk
);
834 switch (hci_pi(sk
)->channel
) {
835 case HCI_CHANNEL_MONITOR
:
836 atomic_dec(&monitor_promisc
);
838 case HCI_CHANNEL_RAW
:
839 case HCI_CHANNEL_USER
:
840 case HCI_CHANNEL_CONTROL
:
841 /* Send event to monitor */
842 skb
= create_monitor_ctrl_close(sk
);
844 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
845 HCI_SOCK_TRUSTED
, NULL
);
849 hci_sock_free_cookie(sk
);
853 bt_sock_unlink(&hci_sk_list
, sk
);
855 hdev
= hci_pi(sk
)->hdev
;
857 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
858 /* When releasing a user channel exclusive access,
859 * call hci_dev_do_close directly instead of calling
860 * hci_dev_close to ensure the exclusive access will
861 * be released and the controller brought back down.
863 * The checking of HCI_AUTO_OFF is not needed in this
864 * case since it will have been cleared already when
865 * opening the user channel.
867 hci_dev_do_close(hdev
);
868 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
869 mgmt_index_added(hdev
);
872 atomic_dec(&hdev
->promisc
);
878 skb_queue_purge(&sk
->sk_receive_queue
);
879 skb_queue_purge(&sk
->sk_write_queue
);
885 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
890 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
895 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
897 hci_dev_unlock(hdev
);
902 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
907 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
912 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
914 hci_dev_unlock(hdev
);
919 /* Ioctls that require bound socket */
920 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
923 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
928 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
931 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
934 if (hdev
->dev_type
!= HCI_PRIMARY
)
939 if (!capable(CAP_NET_ADMIN
))
944 return hci_get_conn_info(hdev
, (void __user
*)arg
);
947 return hci_get_auth_info(hdev
, (void __user
*)arg
);
950 if (!capable(CAP_NET_ADMIN
))
952 return hci_sock_blacklist_add(hdev
, (void __user
*)arg
);
955 if (!capable(CAP_NET_ADMIN
))
957 return hci_sock_blacklist_del(hdev
, (void __user
*)arg
);
963 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
966 void __user
*argp
= (void __user
*)arg
;
967 struct sock
*sk
= sock
->sk
;
970 BT_DBG("cmd %x arg %lx", cmd
, arg
);
974 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
979 /* When calling an ioctl on an unbound raw socket, then ensure
980 * that the monitor gets informed. Ensure that the resulting event
981 * is only send once by checking if the cookie exists or not. The
982 * socket cookie will be only ever generated once for the lifetime
985 if (hci_sock_gen_cookie(sk
)) {
988 if (capable(CAP_NET_ADMIN
))
989 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
991 /* Send event to monitor */
992 skb
= create_monitor_ctrl_open(sk
);
994 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
995 HCI_SOCK_TRUSTED
, NULL
);
1004 return hci_get_dev_list(argp
);
1007 return hci_get_dev_info(argp
);
1009 case HCIGETCONNLIST
:
1010 return hci_get_conn_list(argp
);
1013 if (!capable(CAP_NET_ADMIN
))
1015 return hci_dev_open(arg
);
1018 if (!capable(CAP_NET_ADMIN
))
1020 return hci_dev_close(arg
);
1023 if (!capable(CAP_NET_ADMIN
))
1025 return hci_dev_reset(arg
);
1028 if (!capable(CAP_NET_ADMIN
))
1030 return hci_dev_reset_stat(arg
);
1037 case HCISETLINKMODE
:
1040 if (!capable(CAP_NET_ADMIN
))
1042 return hci_dev_cmd(cmd
, argp
);
1045 return hci_inquiry(argp
);
1050 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
1057 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
1060 struct sockaddr_hci haddr
;
1061 struct sock
*sk
= sock
->sk
;
1062 struct hci_dev
*hdev
= NULL
;
1063 struct sk_buff
*skb
;
1066 BT_DBG("sock %p sk %p", sock
, sk
);
1071 memset(&haddr
, 0, sizeof(haddr
));
1072 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
1073 memcpy(&haddr
, addr
, len
);
1075 if (haddr
.hci_family
!= AF_BLUETOOTH
)
1080 if (sk
->sk_state
== BT_BOUND
) {
1085 switch (haddr
.hci_channel
) {
1086 case HCI_CHANNEL_RAW
:
1087 if (hci_pi(sk
)->hdev
) {
1092 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1093 hdev
= hci_dev_get(haddr
.hci_dev
);
1099 atomic_inc(&hdev
->promisc
);
1102 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1104 if (!hci_sock_gen_cookie(sk
)) {
1105 /* In the case when a cookie has already been assigned,
1106 * then there has been already an ioctl issued against
1107 * an unbound socket and with that triggerd an open
1108 * notification. Send a close notification first to
1109 * allow the state transition to bounded.
1111 skb
= create_monitor_ctrl_close(sk
);
1113 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1114 HCI_SOCK_TRUSTED
, NULL
);
1119 if (capable(CAP_NET_ADMIN
))
1120 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1122 hci_pi(sk
)->hdev
= hdev
;
1124 /* Send event to monitor */
1125 skb
= create_monitor_ctrl_open(sk
);
1127 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1128 HCI_SOCK_TRUSTED
, NULL
);
1133 case HCI_CHANNEL_USER
:
1134 if (hci_pi(sk
)->hdev
) {
1139 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
1144 if (!capable(CAP_NET_ADMIN
)) {
1149 hdev
= hci_dev_get(haddr
.hci_dev
);
1155 if (test_bit(HCI_INIT
, &hdev
->flags
) ||
1156 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1157 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1158 (!hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) &&
1159 test_bit(HCI_UP
, &hdev
->flags
))) {
1165 if (hci_dev_test_and_set_flag(hdev
, HCI_USER_CHANNEL
)) {
1171 mgmt_index_removed(hdev
);
1173 err
= hci_dev_open(hdev
->id
);
1175 if (err
== -EALREADY
) {
1176 /* In case the transport is already up and
1177 * running, clear the error here.
1179 * This can happen when opening a user
1180 * channel and HCI_AUTO_OFF grace period
1185 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
1186 mgmt_index_added(hdev
);
1192 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1194 if (!hci_sock_gen_cookie(sk
)) {
1195 /* In the case when a cookie has already been assigned,
1196 * this socket will transition from a raw socket into
1197 * a user channel socket. For a clean transition, send
1198 * the close notification first.
1200 skb
= create_monitor_ctrl_close(sk
);
1202 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1203 HCI_SOCK_TRUSTED
, NULL
);
1208 /* The user channel is restricted to CAP_NET_ADMIN
1209 * capabilities and with that implicitly trusted.
1211 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1213 hci_pi(sk
)->hdev
= hdev
;
1215 /* Send event to monitor */
1216 skb
= create_monitor_ctrl_open(sk
);
1218 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1219 HCI_SOCK_TRUSTED
, NULL
);
1223 atomic_inc(&hdev
->promisc
);
1226 case HCI_CHANNEL_MONITOR
:
1227 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1232 if (!capable(CAP_NET_RAW
)) {
1237 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1239 /* The monitor interface is restricted to CAP_NET_RAW
1240 * capabilities and with that implicitly trusted.
1242 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1244 send_monitor_note(sk
, "Linux version %s (%s)",
1245 init_utsname()->release
,
1246 init_utsname()->machine
);
1247 send_monitor_note(sk
, "Bluetooth subsystem version %u.%u",
1248 BT_SUBSYS_VERSION
, BT_SUBSYS_REVISION
);
1249 send_monitor_replay(sk
);
1250 send_monitor_control_replay(sk
);
1252 atomic_inc(&monitor_promisc
);
1255 case HCI_CHANNEL_LOGGING
:
1256 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1261 if (!capable(CAP_NET_ADMIN
)) {
1266 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1270 if (!hci_mgmt_chan_find(haddr
.hci_channel
)) {
1275 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
1280 /* Users with CAP_NET_ADMIN capabilities are allowed
1281 * access to all management commands and events. For
1282 * untrusted users the interface is restricted and
1283 * also only untrusted events are sent.
1285 if (capable(CAP_NET_ADMIN
))
1286 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
1288 hci_pi(sk
)->channel
= haddr
.hci_channel
;
1290 /* At the moment the index and unconfigured index events
1291 * are enabled unconditionally. Setting them on each
1292 * socket when binding keeps this functionality. They
1293 * however might be cleared later and then sending of these
1294 * events will be disabled, but that is then intentional.
1296 * This also enables generic events that are safe to be
1297 * received by untrusted users. Example for such events
1298 * are changes to settings, class of device, name etc.
1300 if (hci_pi(sk
)->channel
== HCI_CHANNEL_CONTROL
) {
1301 if (!hci_sock_gen_cookie(sk
)) {
1302 /* In the case when a cookie has already been
1303 * assigned, this socket will transtion from
1304 * a raw socket into a control socket. To
1305 * allow for a clean transtion, send the
1306 * close notification first.
1308 skb
= create_monitor_ctrl_close(sk
);
1310 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1311 HCI_SOCK_TRUSTED
, NULL
);
1316 /* Send event to monitor */
1317 skb
= create_monitor_ctrl_open(sk
);
1319 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1320 HCI_SOCK_TRUSTED
, NULL
);
1324 hci_sock_set_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
1325 hci_sock_set_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
1326 hci_sock_set_flag(sk
, HCI_MGMT_OPTION_EVENTS
);
1327 hci_sock_set_flag(sk
, HCI_MGMT_SETTING_EVENTS
);
1328 hci_sock_set_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
1329 hci_sock_set_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
1334 sk
->sk_state
= BT_BOUND
;
1341 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
1344 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*)addr
;
1345 struct sock
*sk
= sock
->sk
;
1346 struct hci_dev
*hdev
;
1349 BT_DBG("sock %p sk %p", sock
, sk
);
1356 hdev
= hci_pi(sk
)->hdev
;
1362 haddr
->hci_family
= AF_BLUETOOTH
;
1363 haddr
->hci_dev
= hdev
->id
;
1364 haddr
->hci_channel
= hci_pi(sk
)->channel
;
1365 err
= sizeof(*haddr
);
1372 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
1373 struct sk_buff
*skb
)
1375 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
1377 if (mask
& HCI_CMSG_DIR
) {
1378 int incoming
= bt_cb(skb
)->incoming
;
1379 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
1383 if (mask
& HCI_CMSG_TSTAMP
) {
1384 #ifdef CONFIG_COMPAT
1385 struct old_timeval32 ctv
;
1387 struct __kernel_old_timeval tv
;
1391 skb_get_timestamp(skb
, &tv
);
1395 #ifdef CONFIG_COMPAT
1396 if (!COMPAT_USE_64BIT_TIME
&&
1397 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
1398 ctv
.tv_sec
= tv
.tv_sec
;
1399 ctv
.tv_usec
= tv
.tv_usec
;
1405 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
1409 static int hci_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1410 size_t len
, int flags
)
1412 int noblock
= flags
& MSG_DONTWAIT
;
1413 struct sock
*sk
= sock
->sk
;
1414 struct sk_buff
*skb
;
1416 unsigned int skblen
;
1418 BT_DBG("sock %p, sk %p", sock
, sk
);
1420 if (flags
& MSG_OOB
)
1423 if (hci_pi(sk
)->channel
== HCI_CHANNEL_LOGGING
)
1426 if (sk
->sk_state
== BT_CLOSED
)
1429 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1436 msg
->msg_flags
|= MSG_TRUNC
;
1440 skb_reset_transport_header(skb
);
1441 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
1443 switch (hci_pi(sk
)->channel
) {
1444 case HCI_CHANNEL_RAW
:
1445 hci_sock_cmsg(sk
, msg
, skb
);
1447 case HCI_CHANNEL_USER
:
1448 case HCI_CHANNEL_MONITOR
:
1449 sock_recv_timestamp(msg
, sk
, skb
);
1452 if (hci_mgmt_chan_find(hci_pi(sk
)->channel
))
1453 sock_recv_timestamp(msg
, sk
, skb
);
1457 skb_free_datagram(sk
, skb
);
1459 if (flags
& MSG_TRUNC
)
1462 return err
? : copied
;
1465 static int hci_mgmt_cmd(struct hci_mgmt_chan
*chan
, struct sock
*sk
,
1466 struct msghdr
*msg
, size_t msglen
)
1470 struct mgmt_hdr
*hdr
;
1471 u16 opcode
, index
, len
;
1472 struct hci_dev
*hdev
= NULL
;
1473 const struct hci_mgmt_handler
*handler
;
1474 bool var_len
, no_hdev
;
1477 BT_DBG("got %zu bytes", msglen
);
1479 if (msglen
< sizeof(*hdr
))
1482 buf
= kmalloc(msglen
, GFP_KERNEL
);
1486 if (memcpy_from_msg(buf
, msg
, msglen
)) {
1492 opcode
= __le16_to_cpu(hdr
->opcode
);
1493 index
= __le16_to_cpu(hdr
->index
);
1494 len
= __le16_to_cpu(hdr
->len
);
1496 if (len
!= msglen
- sizeof(*hdr
)) {
1501 if (chan
->channel
== HCI_CHANNEL_CONTROL
) {
1502 struct sk_buff
*skb
;
1504 /* Send event to monitor */
1505 skb
= create_monitor_ctrl_command(sk
, index
, opcode
, len
,
1506 buf
+ sizeof(*hdr
));
1508 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
1509 HCI_SOCK_TRUSTED
, NULL
);
1514 if (opcode
>= chan
->handler_count
||
1515 chan
->handlers
[opcode
].func
== NULL
) {
1516 BT_DBG("Unknown op %u", opcode
);
1517 err
= mgmt_cmd_status(sk
, index
, opcode
,
1518 MGMT_STATUS_UNKNOWN_COMMAND
);
1522 handler
= &chan
->handlers
[opcode
];
1524 if (!hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) &&
1525 !(handler
->flags
& HCI_MGMT_UNTRUSTED
)) {
1526 err
= mgmt_cmd_status(sk
, index
, opcode
,
1527 MGMT_STATUS_PERMISSION_DENIED
);
1531 if (index
!= MGMT_INDEX_NONE
) {
1532 hdev
= hci_dev_get(index
);
1534 err
= mgmt_cmd_status(sk
, index
, opcode
,
1535 MGMT_STATUS_INVALID_INDEX
);
1539 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1540 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1541 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1542 err
= mgmt_cmd_status(sk
, index
, opcode
,
1543 MGMT_STATUS_INVALID_INDEX
);
1547 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1548 !(handler
->flags
& HCI_MGMT_UNCONFIGURED
)) {
1549 err
= mgmt_cmd_status(sk
, index
, opcode
,
1550 MGMT_STATUS_INVALID_INDEX
);
1555 no_hdev
= (handler
->flags
& HCI_MGMT_NO_HDEV
);
1556 if (no_hdev
!= !hdev
) {
1557 err
= mgmt_cmd_status(sk
, index
, opcode
,
1558 MGMT_STATUS_INVALID_INDEX
);
1562 var_len
= (handler
->flags
& HCI_MGMT_VAR_LEN
);
1563 if ((var_len
&& len
< handler
->data_len
) ||
1564 (!var_len
&& len
!= handler
->data_len
)) {
1565 err
= mgmt_cmd_status(sk
, index
, opcode
,
1566 MGMT_STATUS_INVALID_PARAMS
);
1570 if (hdev
&& chan
->hdev_init
)
1571 chan
->hdev_init(sk
, hdev
);
1573 cp
= buf
+ sizeof(*hdr
);
1575 err
= handler
->func(sk
, hdev
, cp
, len
);
1589 static int hci_logging_frame(struct sock
*sk
, struct msghdr
*msg
, int len
)
1591 struct hci_mon_hdr
*hdr
;
1592 struct sk_buff
*skb
;
1593 struct hci_dev
*hdev
;
1597 /* The logging frame consists at minimum of the standard header,
1598 * the priority byte, the ident length byte and at least one string
1599 * terminator NUL byte. Anything shorter are invalid packets.
1601 if (len
< sizeof(*hdr
) + 3)
1604 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1608 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1613 hdr
= (void *)skb
->data
;
1615 if (__le16_to_cpu(hdr
->len
) != len
- sizeof(*hdr
)) {
1620 if (__le16_to_cpu(hdr
->opcode
) == 0x0000) {
1621 __u8 priority
= skb
->data
[sizeof(*hdr
)];
1622 __u8 ident_len
= skb
->data
[sizeof(*hdr
) + 1];
1624 /* Only the priorities 0-7 are valid and with that any other
1625 * value results in an invalid packet.
1627 * The priority byte is followed by an ident length byte and
1628 * the NUL terminated ident string. Check that the ident
1629 * length is not overflowing the packet and also that the
1630 * ident string itself is NUL terminated. In case the ident
1631 * length is zero, the length value actually doubles as NUL
1632 * terminator identifier.
1634 * The message follows the ident string (if present) and
1635 * must be NUL terminated. Otherwise it is not a valid packet.
1637 if (priority
> 7 || skb
->data
[len
- 1] != 0x00 ||
1638 ident_len
> len
- sizeof(*hdr
) - 3 ||
1639 skb
->data
[sizeof(*hdr
) + ident_len
+ 1] != 0x00) {
1648 index
= __le16_to_cpu(hdr
->index
);
1650 if (index
!= MGMT_INDEX_NONE
) {
1651 hdev
= hci_dev_get(index
);
1660 hdr
->opcode
= cpu_to_le16(HCI_MON_USER_LOGGING
);
1662 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
, HCI_SOCK_TRUSTED
, NULL
);
1673 static int hci_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1676 struct sock
*sk
= sock
->sk
;
1677 struct hci_mgmt_chan
*chan
;
1678 struct hci_dev
*hdev
;
1679 struct sk_buff
*skb
;
1682 BT_DBG("sock %p sk %p", sock
, sk
);
1684 if (msg
->msg_flags
& MSG_OOB
)
1687 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
|
1691 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
1696 switch (hci_pi(sk
)->channel
) {
1697 case HCI_CHANNEL_RAW
:
1698 case HCI_CHANNEL_USER
:
1700 case HCI_CHANNEL_MONITOR
:
1703 case HCI_CHANNEL_LOGGING
:
1704 err
= hci_logging_frame(sk
, msg
, len
);
1707 mutex_lock(&mgmt_chan_list_lock
);
1708 chan
= __hci_mgmt_chan_find(hci_pi(sk
)->channel
);
1710 err
= hci_mgmt_cmd(chan
, sk
, msg
, len
);
1714 mutex_unlock(&mgmt_chan_list_lock
);
1718 hdev
= hci_pi(sk
)->hdev
;
1724 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1729 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1733 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1738 hci_skb_pkt_type(skb
) = skb
->data
[0];
1741 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
1742 /* No permission check is needed for user channel
1743 * since that gets enforced when binding the socket.
1745 * However check that the packet type is valid.
1747 if (hci_skb_pkt_type(skb
) != HCI_COMMAND_PKT
&&
1748 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
1749 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
1754 skb_queue_tail(&hdev
->raw_q
, skb
);
1755 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1756 } else if (hci_skb_pkt_type(skb
) == HCI_COMMAND_PKT
) {
1757 u16 opcode
= get_unaligned_le16(skb
->data
);
1758 u16 ogf
= hci_opcode_ogf(opcode
);
1759 u16 ocf
= hci_opcode_ocf(opcode
);
1761 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
1762 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
1763 &hci_sec_filter
.ocf_mask
[ogf
])) &&
1764 !capable(CAP_NET_RAW
)) {
1769 /* Since the opcode has already been extracted here, store
1770 * a copy of the value for later use by the drivers.
1772 hci_skb_opcode(skb
) = opcode
;
1775 skb_queue_tail(&hdev
->raw_q
, skb
);
1776 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1778 /* Stand-alone HCI commands must be flagged as
1779 * single-command requests.
1781 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
1783 skb_queue_tail(&hdev
->cmd_q
, skb
);
1784 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1787 if (!capable(CAP_NET_RAW
)) {
1792 if (hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
1793 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
1798 skb_queue_tail(&hdev
->raw_q
, skb
);
1799 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1813 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1814 char __user
*optval
, unsigned int len
)
1816 struct hci_ufilter uf
= { .opcode
= 0 };
1817 struct sock
*sk
= sock
->sk
;
1818 int err
= 0, opt
= 0;
1820 BT_DBG("sk %p, opt %d", sk
, optname
);
1822 if (level
!= SOL_HCI
)
1823 return -ENOPROTOOPT
;
1827 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1834 if (get_user(opt
, (int __user
*)optval
)) {
1840 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1842 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1845 case HCI_TIME_STAMP
:
1846 if (get_user(opt
, (int __user
*)optval
)) {
1852 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1854 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1859 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1861 uf
.type_mask
= f
->type_mask
;
1862 uf
.opcode
= f
->opcode
;
1863 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1864 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1867 len
= min_t(unsigned int, len
, sizeof(uf
));
1868 if (copy_from_user(&uf
, optval
, len
)) {
1873 if (!capable(CAP_NET_RAW
)) {
1874 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1875 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1876 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1880 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1882 f
->type_mask
= uf
.type_mask
;
1883 f
->opcode
= uf
.opcode
;
1884 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1885 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1899 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1900 char __user
*optval
, int __user
*optlen
)
1902 struct hci_ufilter uf
;
1903 struct sock
*sk
= sock
->sk
;
1904 int len
, opt
, err
= 0;
1906 BT_DBG("sk %p, opt %d", sk
, optname
);
1908 if (level
!= SOL_HCI
)
1909 return -ENOPROTOOPT
;
1911 if (get_user(len
, optlen
))
1916 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1923 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1928 if (put_user(opt
, optval
))
1932 case HCI_TIME_STAMP
:
1933 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1938 if (put_user(opt
, optval
))
1944 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1946 memset(&uf
, 0, sizeof(uf
));
1947 uf
.type_mask
= f
->type_mask
;
1948 uf
.opcode
= f
->opcode
;
1949 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1950 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1953 len
= min_t(unsigned int, len
, sizeof(uf
));
1954 if (copy_to_user(optval
, &uf
, len
))
1968 static const struct proto_ops hci_sock_ops
= {
1969 .family
= PF_BLUETOOTH
,
1970 .owner
= THIS_MODULE
,
1971 .release
= hci_sock_release
,
1972 .bind
= hci_sock_bind
,
1973 .getname
= hci_sock_getname
,
1974 .sendmsg
= hci_sock_sendmsg
,
1975 .recvmsg
= hci_sock_recvmsg
,
1976 .ioctl
= hci_sock_ioctl
,
1977 .poll
= datagram_poll
,
1978 .listen
= sock_no_listen
,
1979 .shutdown
= sock_no_shutdown
,
1980 .setsockopt
= hci_sock_setsockopt
,
1981 .getsockopt
= hci_sock_getsockopt
,
1982 .connect
= sock_no_connect
,
1983 .socketpair
= sock_no_socketpair
,
1984 .accept
= sock_no_accept
,
1985 .mmap
= sock_no_mmap
1988 static struct proto hci_sk_proto
= {
1990 .owner
= THIS_MODULE
,
1991 .obj_size
= sizeof(struct hci_pinfo
)
1994 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1999 BT_DBG("sock %p", sock
);
2001 if (sock
->type
!= SOCK_RAW
)
2002 return -ESOCKTNOSUPPORT
;
2004 sock
->ops
= &hci_sock_ops
;
2006 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
, kern
);
2010 sock_init_data(sock
, sk
);
2012 sock_reset_flag(sk
, SOCK_ZAPPED
);
2014 sk
->sk_protocol
= protocol
;
2016 sock
->state
= SS_UNCONNECTED
;
2017 sk
->sk_state
= BT_OPEN
;
2019 bt_sock_link(&hci_sk_list
, sk
);
2023 static const struct net_proto_family hci_sock_family_ops
= {
2024 .family
= PF_BLUETOOTH
,
2025 .owner
= THIS_MODULE
,
2026 .create
= hci_sock_create
,
2029 int __init
hci_sock_init(void)
2033 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
2035 err
= proto_register(&hci_sk_proto
, 0);
2039 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
2041 BT_ERR("HCI socket registration failed");
2045 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
2047 BT_ERR("Failed to create HCI proc file");
2048 bt_sock_unregister(BTPROTO_HCI
);
2052 BT_INFO("HCI socket layer initialized");
2057 proto_unregister(&hci_sk_proto
);
2061 void hci_sock_cleanup(void)
2063 bt_procfs_cleanup(&init_net
, "hci");
2064 bt_sock_unregister(BTPROTO_HCI
);
2065 proto_unregister(&hci_sk_proto
);