2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list
);
38 static DEFINE_MUTEX(mgmt_chan_list_lock
);
40 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter
;
52 unsigned short channel
;
56 void hci_sock_set_flag(struct sock
*sk
, int nr
)
58 set_bit(nr
, &hci_pi(sk
)->flags
);
61 void hci_sock_clear_flag(struct sock
*sk
, int nr
)
63 clear_bit(nr
, &hci_pi(sk
)->flags
);
66 int hci_sock_test_flag(struct sock
*sk
, int nr
)
68 return test_bit(nr
, &hci_pi(sk
)->flags
);
71 unsigned short hci_sock_get_channel(struct sock
*sk
)
73 return hci_pi(sk
)->channel
;
76 static inline int hci_test_bit(int nr
, const void *addr
)
78 return *((const __u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter
{
87 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
90 static const struct hci_sec_filter hci_sec_filter
= {
94 { 0x1000d9fe, 0x0000b00c },
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list
= {
112 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
115 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
117 struct hci_filter
*flt
;
118 int flt_type
, flt_event
;
121 flt
= &hci_pi(sk
)->filter
;
123 flt_type
= bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
;
125 if (!test_bit(flt_type
, &flt
->type_mask
))
128 /* Extra filter for event packets only */
129 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
)
132 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
134 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
137 /* Check filter only when opcode is set */
141 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
142 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
145 if (flt_event
== HCI_EV_CMD_STATUS
&&
146 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
156 struct sk_buff
*skb_copy
= NULL
;
158 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
160 read_lock(&hci_sk_list
.lock
);
162 sk_for_each(sk
, &hci_sk_list
.head
) {
163 struct sk_buff
*nskb
;
165 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
168 /* Don't send frame to the socket it came from */
172 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
173 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
174 bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
175 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
176 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
178 if (is_filtered_packet(sk
, skb
))
180 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
181 if (!bt_cb(skb
)->incoming
)
183 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
184 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
185 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
188 /* Don't send frame to other channel types */
193 /* Create a private copy with headroom */
194 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
198 /* Put type byte before the data */
199 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
202 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
206 if (sock_queue_rcv_skb(sk
, nskb
))
210 read_unlock(&hci_sk_list
.lock
);
215 /* Send frame to sockets with specific channel */
216 void hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
217 int flag
, struct sock
*skip_sk
)
221 BT_DBG("channel %u len %d", channel
, skb
->len
);
223 read_lock(&hci_sk_list
.lock
);
225 sk_for_each(sk
, &hci_sk_list
.head
) {
226 struct sk_buff
*nskb
;
228 /* Ignore socket without the flag set */
229 if (!hci_sock_test_flag(sk
, flag
))
232 /* Skip the original socket */
236 if (sk
->sk_state
!= BT_BOUND
)
239 if (hci_pi(sk
)->channel
!= channel
)
242 nskb
= skb_clone(skb
, GFP_ATOMIC
);
246 if (sock_queue_rcv_skb(sk
, nskb
))
250 read_unlock(&hci_sk_list
.lock
);
253 /* Send frame to monitor socket */
254 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
256 struct sk_buff
*skb_copy
= NULL
;
257 struct hci_mon_hdr
*hdr
;
260 if (!atomic_read(&monitor_promisc
))
263 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
265 switch (bt_cb(skb
)->pkt_type
) {
266 case HCI_COMMAND_PKT
:
267 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
270 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
272 case HCI_ACLDATA_PKT
:
273 if (bt_cb(skb
)->incoming
)
274 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
276 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
278 case HCI_SCODATA_PKT
:
279 if (bt_cb(skb
)->incoming
)
280 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
282 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
285 opcode
= cpu_to_le16(HCI_MON_VENDOR_DIAG
);
291 /* Create a private copy with headroom */
292 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
296 /* Put header before the data */
297 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
298 hdr
->opcode
= opcode
;
299 hdr
->index
= cpu_to_le16(hdev
->id
);
300 hdr
->len
= cpu_to_le16(skb
->len
);
302 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb_copy
,
303 HCI_SOCK_TRUSTED
, NULL
);
307 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
309 struct hci_mon_hdr
*hdr
;
310 struct hci_mon_new_index
*ni
;
311 struct hci_mon_index_info
*ii
;
317 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
321 ni
= (void *)skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
322 ni
->type
= hdev
->dev_type
;
324 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
325 memcpy(ni
->name
, hdev
->name
, 8);
327 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
331 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
335 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
339 if (hdev
->manufacturer
== 0xffff)
345 skb
= bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE
, GFP_ATOMIC
);
349 ii
= (void *)skb_put(skb
, HCI_MON_INDEX_INFO_SIZE
);
350 bacpy(&ii
->bdaddr
, &hdev
->bdaddr
);
351 ii
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
353 opcode
= cpu_to_le16(HCI_MON_INDEX_INFO
);
357 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
361 opcode
= cpu_to_le16(HCI_MON_OPEN_INDEX
);
365 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
369 opcode
= cpu_to_le16(HCI_MON_CLOSE_INDEX
);
376 __net_timestamp(skb
);
378 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
379 hdr
->opcode
= opcode
;
380 hdr
->index
= cpu_to_le16(hdev
->id
);
381 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
386 static void send_monitor_replay(struct sock
*sk
)
388 struct hci_dev
*hdev
;
390 read_lock(&hci_dev_list_lock
);
392 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
395 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
399 if (sock_queue_rcv_skb(sk
, skb
))
402 if (!test_bit(HCI_RUNNING
, &hdev
->flags
))
405 skb
= create_monitor_event(hdev
, HCI_DEV_OPEN
);
409 if (sock_queue_rcv_skb(sk
, skb
))
412 if (test_bit(HCI_UP
, &hdev
->flags
))
413 skb
= create_monitor_event(hdev
, HCI_DEV_UP
);
414 else if (hci_dev_test_flag(hdev
, HCI_SETUP
))
415 skb
= create_monitor_event(hdev
, HCI_DEV_SETUP
);
420 if (sock_queue_rcv_skb(sk
, skb
))
425 read_unlock(&hci_dev_list_lock
);
428 /* Generate internal stack event */
429 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
431 struct hci_event_hdr
*hdr
;
432 struct hci_ev_stack_internal
*ev
;
435 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
439 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
440 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
441 hdr
->plen
= sizeof(*ev
) + dlen
;
443 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
445 memcpy(ev
->data
, data
, dlen
);
447 bt_cb(skb
)->incoming
= 1;
448 __net_timestamp(skb
);
450 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
451 hci_send_to_sock(hdev
, skb
);
455 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
457 BT_DBG("hdev %s event %d", hdev
->name
, event
);
459 if (atomic_read(&monitor_promisc
)) {
462 /* Send event to monitor */
463 skb
= create_monitor_event(hdev
, event
);
465 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
466 HCI_SOCK_TRUSTED
, NULL
);
471 if (event
<= HCI_DEV_DOWN
) {
472 struct hci_ev_si_device ev
;
474 /* Send event to sockets */
476 ev
.dev_id
= hdev
->id
;
477 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
480 if (event
== HCI_DEV_UNREG
) {
483 /* Detach sockets from device */
484 read_lock(&hci_sk_list
.lock
);
485 sk_for_each(sk
, &hci_sk_list
.head
) {
486 bh_lock_sock_nested(sk
);
487 if (hci_pi(sk
)->hdev
== hdev
) {
488 hci_pi(sk
)->hdev
= NULL
;
490 sk
->sk_state
= BT_OPEN
;
491 sk
->sk_state_change(sk
);
497 read_unlock(&hci_sk_list
.lock
);
501 static struct hci_mgmt_chan
*__hci_mgmt_chan_find(unsigned short channel
)
503 struct hci_mgmt_chan
*c
;
505 list_for_each_entry(c
, &mgmt_chan_list
, list
) {
506 if (c
->channel
== channel
)
513 static struct hci_mgmt_chan
*hci_mgmt_chan_find(unsigned short channel
)
515 struct hci_mgmt_chan
*c
;
517 mutex_lock(&mgmt_chan_list_lock
);
518 c
= __hci_mgmt_chan_find(channel
);
519 mutex_unlock(&mgmt_chan_list_lock
);
524 int hci_mgmt_chan_register(struct hci_mgmt_chan
*c
)
526 if (c
->channel
< HCI_CHANNEL_CONTROL
)
529 mutex_lock(&mgmt_chan_list_lock
);
530 if (__hci_mgmt_chan_find(c
->channel
)) {
531 mutex_unlock(&mgmt_chan_list_lock
);
535 list_add_tail(&c
->list
, &mgmt_chan_list
);
537 mutex_unlock(&mgmt_chan_list_lock
);
541 EXPORT_SYMBOL(hci_mgmt_chan_register
);
543 void hci_mgmt_chan_unregister(struct hci_mgmt_chan
*c
)
545 mutex_lock(&mgmt_chan_list_lock
);
547 mutex_unlock(&mgmt_chan_list_lock
);
549 EXPORT_SYMBOL(hci_mgmt_chan_unregister
);
551 static int hci_sock_release(struct socket
*sock
)
553 struct sock
*sk
= sock
->sk
;
554 struct hci_dev
*hdev
;
556 BT_DBG("sock %p sk %p", sock
, sk
);
561 hdev
= hci_pi(sk
)->hdev
;
563 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
564 atomic_dec(&monitor_promisc
);
566 bt_sock_unlink(&hci_sk_list
, sk
);
569 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
570 /* When releasing an user channel exclusive access,
571 * call hci_dev_do_close directly instead of calling
572 * hci_dev_close to ensure the exclusive access will
573 * be released and the controller brought back down.
575 * The checking of HCI_AUTO_OFF is not needed in this
576 * case since it will have been cleared already when
577 * opening the user channel.
579 hci_dev_do_close(hdev
);
580 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
581 mgmt_index_added(hdev
);
584 atomic_dec(&hdev
->promisc
);
590 skb_queue_purge(&sk
->sk_receive_queue
);
591 skb_queue_purge(&sk
->sk_write_queue
);
597 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
602 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
607 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
609 hci_dev_unlock(hdev
);
614 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
619 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
624 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
626 hci_dev_unlock(hdev
);
631 /* Ioctls that require bound socket */
632 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
635 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
640 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
643 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
646 if (hdev
->dev_type
!= HCI_BREDR
)
651 if (!capable(CAP_NET_ADMIN
))
656 return hci_get_conn_info(hdev
, (void __user
*) arg
);
659 return hci_get_auth_info(hdev
, (void __user
*) arg
);
662 if (!capable(CAP_NET_ADMIN
))
664 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
667 if (!capable(CAP_NET_ADMIN
))
669 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
675 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
678 void __user
*argp
= (void __user
*) arg
;
679 struct sock
*sk
= sock
->sk
;
682 BT_DBG("cmd %x arg %lx", cmd
, arg
);
686 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
695 return hci_get_dev_list(argp
);
698 return hci_get_dev_info(argp
);
701 return hci_get_conn_list(argp
);
704 if (!capable(CAP_NET_ADMIN
))
706 return hci_dev_open(arg
);
709 if (!capable(CAP_NET_ADMIN
))
711 return hci_dev_close(arg
);
714 if (!capable(CAP_NET_ADMIN
))
716 return hci_dev_reset(arg
);
719 if (!capable(CAP_NET_ADMIN
))
721 return hci_dev_reset_stat(arg
);
731 if (!capable(CAP_NET_ADMIN
))
733 return hci_dev_cmd(cmd
, argp
);
736 return hci_inquiry(argp
);
741 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
748 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
751 struct sockaddr_hci haddr
;
752 struct sock
*sk
= sock
->sk
;
753 struct hci_dev
*hdev
= NULL
;
756 BT_DBG("sock %p sk %p", sock
, sk
);
761 memset(&haddr
, 0, sizeof(haddr
));
762 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
763 memcpy(&haddr
, addr
, len
);
765 if (haddr
.hci_family
!= AF_BLUETOOTH
)
770 if (sk
->sk_state
== BT_BOUND
) {
775 switch (haddr
.hci_channel
) {
776 case HCI_CHANNEL_RAW
:
777 if (hci_pi(sk
)->hdev
) {
782 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
783 hdev
= hci_dev_get(haddr
.hci_dev
);
789 atomic_inc(&hdev
->promisc
);
792 hci_pi(sk
)->hdev
= hdev
;
795 case HCI_CHANNEL_USER
:
796 if (hci_pi(sk
)->hdev
) {
801 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
806 if (!capable(CAP_NET_ADMIN
)) {
811 hdev
= hci_dev_get(haddr
.hci_dev
);
817 if (test_bit(HCI_INIT
, &hdev
->flags
) ||
818 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
819 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
820 (!hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) &&
821 test_bit(HCI_UP
, &hdev
->flags
))) {
827 if (hci_dev_test_and_set_flag(hdev
, HCI_USER_CHANNEL
)) {
833 mgmt_index_removed(hdev
);
835 err
= hci_dev_open(hdev
->id
);
837 if (err
== -EALREADY
) {
838 /* In case the transport is already up and
839 * running, clear the error here.
841 * This can happen when opening an user
842 * channel and HCI_AUTO_OFF grace period
847 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
848 mgmt_index_added(hdev
);
854 atomic_inc(&hdev
->promisc
);
856 hci_pi(sk
)->hdev
= hdev
;
859 case HCI_CHANNEL_MONITOR
:
860 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
865 if (!capable(CAP_NET_RAW
)) {
870 /* The monitor interface is restricted to CAP_NET_RAW
871 * capabilities and with that implicitly trusted.
873 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
875 send_monitor_replay(sk
);
877 atomic_inc(&monitor_promisc
);
881 if (!hci_mgmt_chan_find(haddr
.hci_channel
)) {
886 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
891 /* Users with CAP_NET_ADMIN capabilities are allowed
892 * access to all management commands and events. For
893 * untrusted users the interface is restricted and
894 * also only untrusted events are sent.
896 if (capable(CAP_NET_ADMIN
))
897 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
899 /* At the moment the index and unconfigured index events
900 * are enabled unconditionally. Setting them on each
901 * socket when binding keeps this functionality. They
902 * however might be cleared later and then sending of these
903 * events will be disabled, but that is then intentional.
905 * This also enables generic events that are safe to be
906 * received by untrusted users. Example for such events
907 * are changes to settings, class of device, name etc.
909 if (haddr
.hci_channel
== HCI_CHANNEL_CONTROL
) {
910 hci_sock_set_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
911 hci_sock_set_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
912 hci_sock_set_flag(sk
, HCI_MGMT_GENERIC_EVENTS
);
918 hci_pi(sk
)->channel
= haddr
.hci_channel
;
919 sk
->sk_state
= BT_BOUND
;
926 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
927 int *addr_len
, int peer
)
929 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
930 struct sock
*sk
= sock
->sk
;
931 struct hci_dev
*hdev
;
934 BT_DBG("sock %p sk %p", sock
, sk
);
941 hdev
= hci_pi(sk
)->hdev
;
947 *addr_len
= sizeof(*haddr
);
948 haddr
->hci_family
= AF_BLUETOOTH
;
949 haddr
->hci_dev
= hdev
->id
;
950 haddr
->hci_channel
= hci_pi(sk
)->channel
;
957 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
960 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
962 if (mask
& HCI_CMSG_DIR
) {
963 int incoming
= bt_cb(skb
)->incoming
;
964 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
968 if (mask
& HCI_CMSG_TSTAMP
) {
970 struct compat_timeval ctv
;
976 skb_get_timestamp(skb
, &tv
);
981 if (!COMPAT_USE_64BIT_TIME
&&
982 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
983 ctv
.tv_sec
= tv
.tv_sec
;
984 ctv
.tv_usec
= tv
.tv_usec
;
990 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
994 static int hci_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
997 int noblock
= flags
& MSG_DONTWAIT
;
998 struct sock
*sk
= sock
->sk
;
1002 BT_DBG("sock %p, sk %p", sock
, sk
);
1004 if (flags
& MSG_OOB
)
1007 if (sk
->sk_state
== BT_CLOSED
)
1010 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1016 msg
->msg_flags
|= MSG_TRUNC
;
1020 skb_reset_transport_header(skb
);
1021 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
1023 switch (hci_pi(sk
)->channel
) {
1024 case HCI_CHANNEL_RAW
:
1025 hci_sock_cmsg(sk
, msg
, skb
);
1027 case HCI_CHANNEL_USER
:
1028 case HCI_CHANNEL_MONITOR
:
1029 sock_recv_timestamp(msg
, sk
, skb
);
1032 if (hci_mgmt_chan_find(hci_pi(sk
)->channel
))
1033 sock_recv_timestamp(msg
, sk
, skb
);
1037 skb_free_datagram(sk
, skb
);
1039 return err
? : copied
;
1042 static int hci_mgmt_cmd(struct hci_mgmt_chan
*chan
, struct sock
*sk
,
1043 struct msghdr
*msg
, size_t msglen
)
1047 struct mgmt_hdr
*hdr
;
1048 u16 opcode
, index
, len
;
1049 struct hci_dev
*hdev
= NULL
;
1050 const struct hci_mgmt_handler
*handler
;
1051 bool var_len
, no_hdev
;
1054 BT_DBG("got %zu bytes", msglen
);
1056 if (msglen
< sizeof(*hdr
))
1059 buf
= kmalloc(msglen
, GFP_KERNEL
);
1063 if (memcpy_from_msg(buf
, msg
, msglen
)) {
1069 opcode
= __le16_to_cpu(hdr
->opcode
);
1070 index
= __le16_to_cpu(hdr
->index
);
1071 len
= __le16_to_cpu(hdr
->len
);
1073 if (len
!= msglen
- sizeof(*hdr
)) {
1078 if (opcode
>= chan
->handler_count
||
1079 chan
->handlers
[opcode
].func
== NULL
) {
1080 BT_DBG("Unknown op %u", opcode
);
1081 err
= mgmt_cmd_status(sk
, index
, opcode
,
1082 MGMT_STATUS_UNKNOWN_COMMAND
);
1086 handler
= &chan
->handlers
[opcode
];
1088 if (!hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) &&
1089 !(handler
->flags
& HCI_MGMT_UNTRUSTED
)) {
1090 err
= mgmt_cmd_status(sk
, index
, opcode
,
1091 MGMT_STATUS_PERMISSION_DENIED
);
1095 if (index
!= MGMT_INDEX_NONE
) {
1096 hdev
= hci_dev_get(index
);
1098 err
= mgmt_cmd_status(sk
, index
, opcode
,
1099 MGMT_STATUS_INVALID_INDEX
);
1103 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1104 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1105 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1106 err
= mgmt_cmd_status(sk
, index
, opcode
,
1107 MGMT_STATUS_INVALID_INDEX
);
1111 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1112 !(handler
->flags
& HCI_MGMT_UNCONFIGURED
)) {
1113 err
= mgmt_cmd_status(sk
, index
, opcode
,
1114 MGMT_STATUS_INVALID_INDEX
);
1119 no_hdev
= (handler
->flags
& HCI_MGMT_NO_HDEV
);
1120 if (no_hdev
!= !hdev
) {
1121 err
= mgmt_cmd_status(sk
, index
, opcode
,
1122 MGMT_STATUS_INVALID_INDEX
);
1126 var_len
= (handler
->flags
& HCI_MGMT_VAR_LEN
);
1127 if ((var_len
&& len
< handler
->data_len
) ||
1128 (!var_len
&& len
!= handler
->data_len
)) {
1129 err
= mgmt_cmd_status(sk
, index
, opcode
,
1130 MGMT_STATUS_INVALID_PARAMS
);
1134 if (hdev
&& chan
->hdev_init
)
1135 chan
->hdev_init(sk
, hdev
);
1137 cp
= buf
+ sizeof(*hdr
);
1139 err
= handler
->func(sk
, hdev
, cp
, len
);
1153 static int hci_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1156 struct sock
*sk
= sock
->sk
;
1157 struct hci_mgmt_chan
*chan
;
1158 struct hci_dev
*hdev
;
1159 struct sk_buff
*skb
;
1162 BT_DBG("sock %p sk %p", sock
, sk
);
1164 if (msg
->msg_flags
& MSG_OOB
)
1167 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
1170 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
1175 switch (hci_pi(sk
)->channel
) {
1176 case HCI_CHANNEL_RAW
:
1177 case HCI_CHANNEL_USER
:
1179 case HCI_CHANNEL_MONITOR
:
1183 mutex_lock(&mgmt_chan_list_lock
);
1184 chan
= __hci_mgmt_chan_find(hci_pi(sk
)->channel
);
1186 err
= hci_mgmt_cmd(chan
, sk
, msg
, len
);
1190 mutex_unlock(&mgmt_chan_list_lock
);
1194 hdev
= hci_pi(sk
)->hdev
;
1200 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1205 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1209 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1214 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
1217 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
1218 /* No permission check is needed for user channel
1219 * since that gets enforced when binding the socket.
1221 * However check that the packet type is valid.
1223 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
1224 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
1225 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
1230 skb_queue_tail(&hdev
->raw_q
, skb
);
1231 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1232 } else if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
1233 u16 opcode
= get_unaligned_le16(skb
->data
);
1234 u16 ogf
= hci_opcode_ogf(opcode
);
1235 u16 ocf
= hci_opcode_ocf(opcode
);
1237 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
1238 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
1239 &hci_sec_filter
.ocf_mask
[ogf
])) &&
1240 !capable(CAP_NET_RAW
)) {
1246 skb_queue_tail(&hdev
->raw_q
, skb
);
1247 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1249 /* Stand-alone HCI commands must be flagged as
1250 * single-command requests.
1252 bt_cb(skb
)->hci
.req_start
= true;
1254 skb_queue_tail(&hdev
->cmd_q
, skb
);
1255 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1258 if (!capable(CAP_NET_RAW
)) {
1263 if (bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
1264 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
1269 skb_queue_tail(&hdev
->raw_q
, skb
);
1270 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1284 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1285 char __user
*optval
, unsigned int len
)
1287 struct hci_ufilter uf
= { .opcode
= 0 };
1288 struct sock
*sk
= sock
->sk
;
1289 int err
= 0, opt
= 0;
1291 BT_DBG("sk %p, opt %d", sk
, optname
);
1295 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1302 if (get_user(opt
, (int __user
*)optval
)) {
1308 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1310 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1313 case HCI_TIME_STAMP
:
1314 if (get_user(opt
, (int __user
*)optval
)) {
1320 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1322 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1327 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1329 uf
.type_mask
= f
->type_mask
;
1330 uf
.opcode
= f
->opcode
;
1331 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1332 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1335 len
= min_t(unsigned int, len
, sizeof(uf
));
1336 if (copy_from_user(&uf
, optval
, len
)) {
1341 if (!capable(CAP_NET_RAW
)) {
1342 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1343 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1344 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1348 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1350 f
->type_mask
= uf
.type_mask
;
1351 f
->opcode
= uf
.opcode
;
1352 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1353 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1367 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1368 char __user
*optval
, int __user
*optlen
)
1370 struct hci_ufilter uf
;
1371 struct sock
*sk
= sock
->sk
;
1372 int len
, opt
, err
= 0;
1374 BT_DBG("sk %p, opt %d", sk
, optname
);
1376 if (get_user(len
, optlen
))
1381 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1388 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1393 if (put_user(opt
, optval
))
1397 case HCI_TIME_STAMP
:
1398 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1403 if (put_user(opt
, optval
))
1409 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1411 memset(&uf
, 0, sizeof(uf
));
1412 uf
.type_mask
= f
->type_mask
;
1413 uf
.opcode
= f
->opcode
;
1414 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1415 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1418 len
= min_t(unsigned int, len
, sizeof(uf
));
1419 if (copy_to_user(optval
, &uf
, len
))
1433 static const struct proto_ops hci_sock_ops
= {
1434 .family
= PF_BLUETOOTH
,
1435 .owner
= THIS_MODULE
,
1436 .release
= hci_sock_release
,
1437 .bind
= hci_sock_bind
,
1438 .getname
= hci_sock_getname
,
1439 .sendmsg
= hci_sock_sendmsg
,
1440 .recvmsg
= hci_sock_recvmsg
,
1441 .ioctl
= hci_sock_ioctl
,
1442 .poll
= datagram_poll
,
1443 .listen
= sock_no_listen
,
1444 .shutdown
= sock_no_shutdown
,
1445 .setsockopt
= hci_sock_setsockopt
,
1446 .getsockopt
= hci_sock_getsockopt
,
1447 .connect
= sock_no_connect
,
1448 .socketpair
= sock_no_socketpair
,
1449 .accept
= sock_no_accept
,
1450 .mmap
= sock_no_mmap
1453 static struct proto hci_sk_proto
= {
1455 .owner
= THIS_MODULE
,
1456 .obj_size
= sizeof(struct hci_pinfo
)
1459 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1464 BT_DBG("sock %p", sock
);
1466 if (sock
->type
!= SOCK_RAW
)
1467 return -ESOCKTNOSUPPORT
;
1469 sock
->ops
= &hci_sock_ops
;
1471 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
, kern
);
1475 sock_init_data(sock
, sk
);
1477 sock_reset_flag(sk
, SOCK_ZAPPED
);
1479 sk
->sk_protocol
= protocol
;
1481 sock
->state
= SS_UNCONNECTED
;
1482 sk
->sk_state
= BT_OPEN
;
1484 bt_sock_link(&hci_sk_list
, sk
);
1488 static const struct net_proto_family hci_sock_family_ops
= {
1489 .family
= PF_BLUETOOTH
,
1490 .owner
= THIS_MODULE
,
1491 .create
= hci_sock_create
,
1494 int __init
hci_sock_init(void)
1498 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
1500 err
= proto_register(&hci_sk_proto
, 0);
1504 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1506 BT_ERR("HCI socket registration failed");
1510 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
1512 BT_ERR("Failed to create HCI proc file");
1513 bt_sock_unregister(BTPROTO_HCI
);
1517 BT_INFO("HCI socket layer initialized");
1522 proto_unregister(&hci_sk_proto
);
1526 void hci_sock_cleanup(void)
1528 bt_procfs_cleanup(&init_net
, "hci");
1529 bt_sock_unregister(BTPROTO_HCI
);
1530 proto_unregister(&hci_sk_proto
);