2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list
);
38 static DEFINE_MUTEX(mgmt_chan_list_lock
);
40 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter
;
52 unsigned short channel
;
56 void hci_sock_set_flag(struct sock
*sk
, int nr
)
58 set_bit(nr
, &hci_pi(sk
)->flags
);
61 void hci_sock_clear_flag(struct sock
*sk
, int nr
)
63 clear_bit(nr
, &hci_pi(sk
)->flags
);
66 int hci_sock_test_flag(struct sock
*sk
, int nr
)
68 return test_bit(nr
, &hci_pi(sk
)->flags
);
71 unsigned short hci_sock_get_channel(struct sock
*sk
)
73 return hci_pi(sk
)->channel
;
76 static inline int hci_test_bit(int nr
, const void *addr
)
78 return *((const __u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter
{
87 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
90 static const struct hci_sec_filter hci_sec_filter
= {
94 { 0x1000d9fe, 0x0000b00c },
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list
= {
112 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
115 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
117 struct hci_filter
*flt
;
118 int flt_type
, flt_event
;
121 flt
= &hci_pi(sk
)->filter
;
123 if (bt_cb(skb
)->pkt_type
== HCI_VENDOR_PKT
)
126 flt_type
= bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
;
128 if (!test_bit(flt_type
, &flt
->type_mask
))
131 /* Extra filter for event packets only */
132 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
)
135 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
137 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
140 /* Check filter only when opcode is set */
144 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
145 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
148 if (flt_event
== HCI_EV_CMD_STATUS
&&
149 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
159 struct sk_buff
*skb_copy
= NULL
;
161 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
163 read_lock(&hci_sk_list
.lock
);
165 sk_for_each(sk
, &hci_sk_list
.head
) {
166 struct sk_buff
*nskb
;
168 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
171 /* Don't send frame to the socket it came from */
175 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
176 if (is_filtered_packet(sk
, skb
))
178 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
179 if (!bt_cb(skb
)->incoming
)
181 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
182 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
183 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
186 /* Don't send frame to other channel types */
191 /* Create a private copy with headroom */
192 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
200 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
204 if (sock_queue_rcv_skb(sk
, nskb
))
208 read_unlock(&hci_sk_list
.lock
);
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
215 int flag
, struct sock
*skip_sk
)
219 BT_DBG("channel %u len %d", channel
, skb
->len
);
221 read_lock(&hci_sk_list
.lock
);
223 sk_for_each(sk
, &hci_sk_list
.head
) {
224 struct sk_buff
*nskb
;
226 /* Ignore socket without the flag set */
227 if (!hci_sock_test_flag(sk
, flag
))
230 /* Skip the original socket */
234 if (sk
->sk_state
!= BT_BOUND
)
237 if (hci_pi(sk
)->channel
!= channel
)
240 nskb
= skb_clone(skb
, GFP_ATOMIC
);
244 if (sock_queue_rcv_skb(sk
, nskb
))
248 read_unlock(&hci_sk_list
.lock
);
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
254 struct sk_buff
*skb_copy
= NULL
;
255 struct hci_mon_hdr
*hdr
;
258 if (!atomic_read(&monitor_promisc
))
261 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
263 switch (bt_cb(skb
)->pkt_type
) {
264 case HCI_COMMAND_PKT
:
265 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
268 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
270 case HCI_ACLDATA_PKT
:
271 if (bt_cb(skb
)->incoming
)
272 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
274 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
276 case HCI_SCODATA_PKT
:
277 if (bt_cb(skb
)->incoming
)
278 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
280 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
286 /* Create a private copy with headroom */
287 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
291 /* Put header before the data */
292 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
293 hdr
->opcode
= opcode
;
294 hdr
->index
= cpu_to_le16(hdev
->id
);
295 hdr
->len
= cpu_to_le16(skb
->len
);
297 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb_copy
,
298 HCI_SOCK_TRUSTED
, NULL
);
302 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
304 struct hci_mon_hdr
*hdr
;
305 struct hci_mon_new_index
*ni
;
311 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
315 ni
= (void *) skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
316 ni
->type
= hdev
->dev_type
;
318 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
319 memcpy(ni
->name
, hdev
->name
, 8);
321 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
325 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
329 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
336 __net_timestamp(skb
);
338 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
339 hdr
->opcode
= opcode
;
340 hdr
->index
= cpu_to_le16(hdev
->id
);
341 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
346 static void send_monitor_replay(struct sock
*sk
)
348 struct hci_dev
*hdev
;
350 read_lock(&hci_dev_list_lock
);
352 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
355 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
359 if (sock_queue_rcv_skb(sk
, skb
))
363 read_unlock(&hci_dev_list_lock
);
366 /* Generate internal stack event */
367 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
369 struct hci_event_hdr
*hdr
;
370 struct hci_ev_stack_internal
*ev
;
373 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
377 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
378 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
379 hdr
->plen
= sizeof(*ev
) + dlen
;
381 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
383 memcpy(ev
->data
, data
, dlen
);
385 bt_cb(skb
)->incoming
= 1;
386 __net_timestamp(skb
);
388 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
389 hci_send_to_sock(hdev
, skb
);
393 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
395 struct hci_ev_si_device ev
;
397 BT_DBG("hdev %s event %d", hdev
->name
, event
);
399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc
)) {
403 skb
= create_monitor_event(hdev
, event
);
405 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
406 HCI_SOCK_TRUSTED
, NULL
);
411 /* Send event to sockets */
413 ev
.dev_id
= hdev
->id
;
414 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
416 if (event
== HCI_DEV_UNREG
) {
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list
.lock
);
421 sk_for_each(sk
, &hci_sk_list
.head
) {
422 bh_lock_sock_nested(sk
);
423 if (hci_pi(sk
)->hdev
== hdev
) {
424 hci_pi(sk
)->hdev
= NULL
;
426 sk
->sk_state
= BT_OPEN
;
427 sk
->sk_state_change(sk
);
433 read_unlock(&hci_sk_list
.lock
);
437 static struct hci_mgmt_chan
*__hci_mgmt_chan_find(unsigned short channel
)
439 struct hci_mgmt_chan
*c
;
441 list_for_each_entry(c
, &mgmt_chan_list
, list
) {
442 if (c
->channel
== channel
)
449 static struct hci_mgmt_chan
*hci_mgmt_chan_find(unsigned short channel
)
451 struct hci_mgmt_chan
*c
;
453 mutex_lock(&mgmt_chan_list_lock
);
454 c
= __hci_mgmt_chan_find(channel
);
455 mutex_unlock(&mgmt_chan_list_lock
);
460 int hci_mgmt_chan_register(struct hci_mgmt_chan
*c
)
462 if (c
->channel
< HCI_CHANNEL_CONTROL
)
465 mutex_lock(&mgmt_chan_list_lock
);
466 if (__hci_mgmt_chan_find(c
->channel
)) {
467 mutex_unlock(&mgmt_chan_list_lock
);
471 list_add_tail(&c
->list
, &mgmt_chan_list
);
473 mutex_unlock(&mgmt_chan_list_lock
);
477 EXPORT_SYMBOL(hci_mgmt_chan_register
);
479 void hci_mgmt_chan_unregister(struct hci_mgmt_chan
*c
)
481 mutex_lock(&mgmt_chan_list_lock
);
483 mutex_unlock(&mgmt_chan_list_lock
);
485 EXPORT_SYMBOL(hci_mgmt_chan_unregister
);
487 static int hci_sock_release(struct socket
*sock
)
489 struct sock
*sk
= sock
->sk
;
490 struct hci_dev
*hdev
;
492 BT_DBG("sock %p sk %p", sock
, sk
);
497 hdev
= hci_pi(sk
)->hdev
;
499 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
500 atomic_dec(&monitor_promisc
);
502 bt_sock_unlink(&hci_sk_list
, sk
);
505 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
506 mgmt_index_added(hdev
);
507 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
508 hci_dev_close(hdev
->id
);
511 atomic_dec(&hdev
->promisc
);
517 skb_queue_purge(&sk
->sk_receive_queue
);
518 skb_queue_purge(&sk
->sk_write_queue
);
524 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
529 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
534 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
536 hci_dev_unlock(hdev
);
541 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
546 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
551 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
553 hci_dev_unlock(hdev
);
558 /* Ioctls that require bound socket */
559 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
562 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
567 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
570 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
573 if (hdev
->dev_type
!= HCI_BREDR
)
578 if (!capable(CAP_NET_ADMIN
))
583 return hci_get_conn_info(hdev
, (void __user
*) arg
);
586 return hci_get_auth_info(hdev
, (void __user
*) arg
);
589 if (!capable(CAP_NET_ADMIN
))
591 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
594 if (!capable(CAP_NET_ADMIN
))
596 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
602 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
605 void __user
*argp
= (void __user
*) arg
;
606 struct sock
*sk
= sock
->sk
;
609 BT_DBG("cmd %x arg %lx", cmd
, arg
);
613 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
622 return hci_get_dev_list(argp
);
625 return hci_get_dev_info(argp
);
628 return hci_get_conn_list(argp
);
631 if (!capable(CAP_NET_ADMIN
))
633 return hci_dev_open(arg
);
636 if (!capable(CAP_NET_ADMIN
))
638 return hci_dev_close(arg
);
641 if (!capable(CAP_NET_ADMIN
))
643 return hci_dev_reset(arg
);
646 if (!capable(CAP_NET_ADMIN
))
648 return hci_dev_reset_stat(arg
);
658 if (!capable(CAP_NET_ADMIN
))
660 return hci_dev_cmd(cmd
, argp
);
663 return hci_inquiry(argp
);
668 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
675 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
678 struct sockaddr_hci haddr
;
679 struct sock
*sk
= sock
->sk
;
680 struct hci_dev
*hdev
= NULL
;
683 BT_DBG("sock %p sk %p", sock
, sk
);
688 memset(&haddr
, 0, sizeof(haddr
));
689 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
690 memcpy(&haddr
, addr
, len
);
692 if (haddr
.hci_family
!= AF_BLUETOOTH
)
697 if (sk
->sk_state
== BT_BOUND
) {
702 switch (haddr
.hci_channel
) {
703 case HCI_CHANNEL_RAW
:
704 if (hci_pi(sk
)->hdev
) {
709 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
710 hdev
= hci_dev_get(haddr
.hci_dev
);
716 atomic_inc(&hdev
->promisc
);
719 hci_pi(sk
)->hdev
= hdev
;
722 case HCI_CHANNEL_USER
:
723 if (hci_pi(sk
)->hdev
) {
728 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
733 if (!capable(CAP_NET_ADMIN
)) {
738 hdev
= hci_dev_get(haddr
.hci_dev
);
744 if (test_bit(HCI_UP
, &hdev
->flags
) ||
745 test_bit(HCI_INIT
, &hdev
->flags
) ||
746 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
747 hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
753 if (hci_dev_test_and_set_flag(hdev
, HCI_USER_CHANNEL
)) {
759 mgmt_index_removed(hdev
);
761 err
= hci_dev_open(hdev
->id
);
763 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
764 mgmt_index_added(hdev
);
769 atomic_inc(&hdev
->promisc
);
771 hci_pi(sk
)->hdev
= hdev
;
774 case HCI_CHANNEL_MONITOR
:
775 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
780 if (!capable(CAP_NET_RAW
)) {
785 /* The monitor interface is restricted to CAP_NET_RAW
786 * capabilities and with that implicitly trusted.
788 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
790 send_monitor_replay(sk
);
792 atomic_inc(&monitor_promisc
);
796 if (!hci_mgmt_chan_find(haddr
.hci_channel
)) {
801 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
806 /* Users with CAP_NET_ADMIN capabilities are allowed
807 * access to all management commands and events. For
808 * untrusted users the interface is restricted and
809 * also only untrusted events are sent.
811 if (capable(CAP_NET_ADMIN
))
812 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
814 /* At the moment the index and unconfigured index events
815 * are enabled unconditionally. Setting them on each
816 * socket when binding keeps this functionality. They
817 * however might be cleared later and then sending of these
818 * events will be disabled, but that is then intentional.
820 * This also enables generic events that are safe to be
821 * received by untrusted users. Example for such events
822 * are changes to settings, class of device, name etc.
824 if (haddr
.hci_channel
== HCI_CHANNEL_CONTROL
) {
825 hci_sock_set_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
826 hci_sock_set_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
827 hci_sock_set_flag(sk
, HCI_MGMT_GENERIC_EVENTS
);
833 hci_pi(sk
)->channel
= haddr
.hci_channel
;
834 sk
->sk_state
= BT_BOUND
;
841 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
842 int *addr_len
, int peer
)
844 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
845 struct sock
*sk
= sock
->sk
;
846 struct hci_dev
*hdev
;
849 BT_DBG("sock %p sk %p", sock
, sk
);
856 hdev
= hci_pi(sk
)->hdev
;
862 *addr_len
= sizeof(*haddr
);
863 haddr
->hci_family
= AF_BLUETOOTH
;
864 haddr
->hci_dev
= hdev
->id
;
865 haddr
->hci_channel
= hci_pi(sk
)->channel
;
872 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
875 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
877 if (mask
& HCI_CMSG_DIR
) {
878 int incoming
= bt_cb(skb
)->incoming
;
879 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
883 if (mask
& HCI_CMSG_TSTAMP
) {
885 struct compat_timeval ctv
;
891 skb_get_timestamp(skb
, &tv
);
896 if (!COMPAT_USE_64BIT_TIME
&&
897 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
898 ctv
.tv_sec
= tv
.tv_sec
;
899 ctv
.tv_usec
= tv
.tv_usec
;
905 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
909 static int hci_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
912 int noblock
= flags
& MSG_DONTWAIT
;
913 struct sock
*sk
= sock
->sk
;
917 BT_DBG("sock %p, sk %p", sock
, sk
);
919 if (flags
& (MSG_OOB
))
922 if (sk
->sk_state
== BT_CLOSED
)
925 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
931 msg
->msg_flags
|= MSG_TRUNC
;
935 skb_reset_transport_header(skb
);
936 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
938 switch (hci_pi(sk
)->channel
) {
939 case HCI_CHANNEL_RAW
:
940 hci_sock_cmsg(sk
, msg
, skb
);
942 case HCI_CHANNEL_USER
:
943 case HCI_CHANNEL_MONITOR
:
944 sock_recv_timestamp(msg
, sk
, skb
);
947 if (hci_mgmt_chan_find(hci_pi(sk
)->channel
))
948 sock_recv_timestamp(msg
, sk
, skb
);
952 skb_free_datagram(sk
, skb
);
954 return err
? : copied
;
957 static int hci_mgmt_cmd(struct hci_mgmt_chan
*chan
, struct sock
*sk
,
958 struct msghdr
*msg
, size_t msglen
)
962 struct mgmt_hdr
*hdr
;
963 u16 opcode
, index
, len
;
964 struct hci_dev
*hdev
= NULL
;
965 const struct hci_mgmt_handler
*handler
;
966 bool var_len
, no_hdev
;
969 BT_DBG("got %zu bytes", msglen
);
971 if (msglen
< sizeof(*hdr
))
974 buf
= kmalloc(msglen
, GFP_KERNEL
);
978 if (memcpy_from_msg(buf
, msg
, msglen
)) {
984 opcode
= __le16_to_cpu(hdr
->opcode
);
985 index
= __le16_to_cpu(hdr
->index
);
986 len
= __le16_to_cpu(hdr
->len
);
988 if (len
!= msglen
- sizeof(*hdr
)) {
993 if (opcode
>= chan
->handler_count
||
994 chan
->handlers
[opcode
].func
== NULL
) {
995 BT_DBG("Unknown op %u", opcode
);
996 err
= mgmt_cmd_status(sk
, index
, opcode
,
997 MGMT_STATUS_UNKNOWN_COMMAND
);
1001 handler
= &chan
->handlers
[opcode
];
1003 if (!hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) &&
1004 !(handler
->flags
& HCI_MGMT_UNTRUSTED
)) {
1005 err
= mgmt_cmd_status(sk
, index
, opcode
,
1006 MGMT_STATUS_PERMISSION_DENIED
);
1010 if (index
!= MGMT_INDEX_NONE
) {
1011 hdev
= hci_dev_get(index
);
1013 err
= mgmt_cmd_status(sk
, index
, opcode
,
1014 MGMT_STATUS_INVALID_INDEX
);
1018 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1019 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1020 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1021 err
= mgmt_cmd_status(sk
, index
, opcode
,
1022 MGMT_STATUS_INVALID_INDEX
);
1026 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1027 !(handler
->flags
& HCI_MGMT_UNCONFIGURED
)) {
1028 err
= mgmt_cmd_status(sk
, index
, opcode
,
1029 MGMT_STATUS_INVALID_INDEX
);
1034 no_hdev
= (handler
->flags
& HCI_MGMT_NO_HDEV
);
1035 if (no_hdev
!= !hdev
) {
1036 err
= mgmt_cmd_status(sk
, index
, opcode
,
1037 MGMT_STATUS_INVALID_INDEX
);
1041 var_len
= (handler
->flags
& HCI_MGMT_VAR_LEN
);
1042 if ((var_len
&& len
< handler
->data_len
) ||
1043 (!var_len
&& len
!= handler
->data_len
)) {
1044 err
= mgmt_cmd_status(sk
, index
, opcode
,
1045 MGMT_STATUS_INVALID_PARAMS
);
1049 if (hdev
&& chan
->hdev_init
)
1050 chan
->hdev_init(sk
, hdev
);
1052 cp
= buf
+ sizeof(*hdr
);
1054 err
= handler
->func(sk
, hdev
, cp
, len
);
1068 static int hci_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1071 struct sock
*sk
= sock
->sk
;
1072 struct hci_mgmt_chan
*chan
;
1073 struct hci_dev
*hdev
;
1074 struct sk_buff
*skb
;
1077 BT_DBG("sock %p sk %p", sock
, sk
);
1079 if (msg
->msg_flags
& MSG_OOB
)
1082 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
1085 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
1090 switch (hci_pi(sk
)->channel
) {
1091 case HCI_CHANNEL_RAW
:
1092 case HCI_CHANNEL_USER
:
1094 case HCI_CHANNEL_MONITOR
:
1098 mutex_lock(&mgmt_chan_list_lock
);
1099 chan
= __hci_mgmt_chan_find(hci_pi(sk
)->channel
);
1101 err
= hci_mgmt_cmd(chan
, sk
, msg
, len
);
1105 mutex_unlock(&mgmt_chan_list_lock
);
1109 hdev
= hci_pi(sk
)->hdev
;
1115 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1120 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1124 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1129 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
1132 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
1133 /* No permission check is needed for user channel
1134 * since that gets enforced when binding the socket.
1136 * However check that the packet type is valid.
1138 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
1139 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
1140 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
1145 skb_queue_tail(&hdev
->raw_q
, skb
);
1146 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1147 } else if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
1148 u16 opcode
= get_unaligned_le16(skb
->data
);
1149 u16 ogf
= hci_opcode_ogf(opcode
);
1150 u16 ocf
= hci_opcode_ocf(opcode
);
1152 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
1153 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
1154 &hci_sec_filter
.ocf_mask
[ogf
])) &&
1155 !capable(CAP_NET_RAW
)) {
1161 skb_queue_tail(&hdev
->raw_q
, skb
);
1162 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1164 /* Stand-alone HCI commands must be flagged as
1165 * single-command requests.
1167 bt_cb(skb
)->req
.start
= true;
1169 skb_queue_tail(&hdev
->cmd_q
, skb
);
1170 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1173 if (!capable(CAP_NET_RAW
)) {
1178 skb_queue_tail(&hdev
->raw_q
, skb
);
1179 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1193 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1194 char __user
*optval
, unsigned int len
)
1196 struct hci_ufilter uf
= { .opcode
= 0 };
1197 struct sock
*sk
= sock
->sk
;
1198 int err
= 0, opt
= 0;
1200 BT_DBG("sk %p, opt %d", sk
, optname
);
1204 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1211 if (get_user(opt
, (int __user
*)optval
)) {
1217 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1219 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1222 case HCI_TIME_STAMP
:
1223 if (get_user(opt
, (int __user
*)optval
)) {
1229 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1231 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1236 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1238 uf
.type_mask
= f
->type_mask
;
1239 uf
.opcode
= f
->opcode
;
1240 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1241 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1244 len
= min_t(unsigned int, len
, sizeof(uf
));
1245 if (copy_from_user(&uf
, optval
, len
)) {
1250 if (!capable(CAP_NET_RAW
)) {
1251 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1252 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1253 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1257 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1259 f
->type_mask
= uf
.type_mask
;
1260 f
->opcode
= uf
.opcode
;
1261 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1262 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1276 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1277 char __user
*optval
, int __user
*optlen
)
1279 struct hci_ufilter uf
;
1280 struct sock
*sk
= sock
->sk
;
1281 int len
, opt
, err
= 0;
1283 BT_DBG("sk %p, opt %d", sk
, optname
);
1285 if (get_user(len
, optlen
))
1290 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1297 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1302 if (put_user(opt
, optval
))
1306 case HCI_TIME_STAMP
:
1307 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1312 if (put_user(opt
, optval
))
1318 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1320 memset(&uf
, 0, sizeof(uf
));
1321 uf
.type_mask
= f
->type_mask
;
1322 uf
.opcode
= f
->opcode
;
1323 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1324 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1327 len
= min_t(unsigned int, len
, sizeof(uf
));
1328 if (copy_to_user(optval
, &uf
, len
))
1342 static const struct proto_ops hci_sock_ops
= {
1343 .family
= PF_BLUETOOTH
,
1344 .owner
= THIS_MODULE
,
1345 .release
= hci_sock_release
,
1346 .bind
= hci_sock_bind
,
1347 .getname
= hci_sock_getname
,
1348 .sendmsg
= hci_sock_sendmsg
,
1349 .recvmsg
= hci_sock_recvmsg
,
1350 .ioctl
= hci_sock_ioctl
,
1351 .poll
= datagram_poll
,
1352 .listen
= sock_no_listen
,
1353 .shutdown
= sock_no_shutdown
,
1354 .setsockopt
= hci_sock_setsockopt
,
1355 .getsockopt
= hci_sock_getsockopt
,
1356 .connect
= sock_no_connect
,
1357 .socketpair
= sock_no_socketpair
,
1358 .accept
= sock_no_accept
,
1359 .mmap
= sock_no_mmap
1362 static struct proto hci_sk_proto
= {
1364 .owner
= THIS_MODULE
,
1365 .obj_size
= sizeof(struct hci_pinfo
)
1368 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1373 BT_DBG("sock %p", sock
);
1375 if (sock
->type
!= SOCK_RAW
)
1376 return -ESOCKTNOSUPPORT
;
1378 sock
->ops
= &hci_sock_ops
;
1380 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
);
1384 sock_init_data(sock
, sk
);
1386 sock_reset_flag(sk
, SOCK_ZAPPED
);
1388 sk
->sk_protocol
= protocol
;
1390 sock
->state
= SS_UNCONNECTED
;
1391 sk
->sk_state
= BT_OPEN
;
1393 bt_sock_link(&hci_sk_list
, sk
);
1397 static const struct net_proto_family hci_sock_family_ops
= {
1398 .family
= PF_BLUETOOTH
,
1399 .owner
= THIS_MODULE
,
1400 .create
= hci_sock_create
,
1403 int __init
hci_sock_init(void)
1407 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
1409 err
= proto_register(&hci_sk_proto
, 0);
1413 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1415 BT_ERR("HCI socket registration failed");
1419 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
1421 BT_ERR("Failed to create HCI proc file");
1422 bt_sock_unregister(BTPROTO_HCI
);
1426 BT_INFO("HCI socket layer initialized");
1431 proto_unregister(&hci_sk_proto
);
1435 void hci_sock_cleanup(void)
1437 bt_procfs_cleanup(&init_net
, "hci");
1438 bt_sock_unregister(BTPROTO_HCI
);
1439 proto_unregister(&hci_sk_proto
);