2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
44 struct hci_filter filter
;
46 unsigned short channel
;
49 static inline int hci_test_bit(int nr
, void *addr
)
51 return *((__u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter
{
60 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
63 static const struct hci_sec_filter hci_sec_filter
= {
67 { 0x1000d9fe, 0x0000b00c },
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list
= {
85 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
88 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
90 struct hci_filter
*flt
;
91 int flt_type
, flt_event
;
94 flt
= &hci_pi(sk
)->filter
;
96 if (bt_cb(skb
)->pkt_type
== HCI_VENDOR_PKT
)
99 flt_type
= bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
;
101 if (!test_bit(flt_type
, &flt
->type_mask
))
104 /* Extra filter for event packets only */
105 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
)
108 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
110 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
113 /* Check filter only when opcode is set */
117 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
118 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
121 if (flt_event
== HCI_EV_CMD_STATUS
&&
122 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
132 struct sk_buff
*skb_copy
= NULL
;
134 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
136 read_lock(&hci_sk_list
.lock
);
138 sk_for_each(sk
, &hci_sk_list
.head
) {
139 struct sk_buff
*nskb
;
141 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
144 /* Don't send frame to the socket it came from */
148 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
149 if (is_filtered_packet(sk
, skb
))
151 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
152 if (!bt_cb(skb
)->incoming
)
154 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
155 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
156 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
159 /* Don't send frame to other channel types */
164 /* Create a private copy with headroom */
165 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
173 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
177 if (sock_queue_rcv_skb(sk
, nskb
))
181 read_unlock(&hci_sk_list
.lock
);
186 /* Send frame to control socket */
187 void hci_send_to_control(struct sk_buff
*skb
, struct sock
*skip_sk
)
191 BT_DBG("len %d", skb
->len
);
193 read_lock(&hci_sk_list
.lock
);
195 sk_for_each(sk
, &hci_sk_list
.head
) {
196 struct sk_buff
*nskb
;
198 /* Skip the original socket */
202 if (sk
->sk_state
!= BT_BOUND
)
205 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_CONTROL
)
208 nskb
= skb_clone(skb
, GFP_ATOMIC
);
212 if (sock_queue_rcv_skb(sk
, nskb
))
216 read_unlock(&hci_sk_list
.lock
);
219 /* Send frame to monitor socket */
220 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
223 struct sk_buff
*skb_copy
= NULL
;
226 if (!atomic_read(&monitor_promisc
))
229 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
231 switch (bt_cb(skb
)->pkt_type
) {
232 case HCI_COMMAND_PKT
:
233 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
236 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
238 case HCI_ACLDATA_PKT
:
239 if (bt_cb(skb
)->incoming
)
240 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
242 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
244 case HCI_SCODATA_PKT
:
245 if (bt_cb(skb
)->incoming
)
246 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
248 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
254 read_lock(&hci_sk_list
.lock
);
256 sk_for_each(sk
, &hci_sk_list
.head
) {
257 struct sk_buff
*nskb
;
259 if (sk
->sk_state
!= BT_BOUND
)
262 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
266 struct hci_mon_hdr
*hdr
;
268 /* Create a private copy with headroom */
269 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
,
274 /* Put header before the data */
275 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
276 hdr
->opcode
= opcode
;
277 hdr
->index
= cpu_to_le16(hdev
->id
);
278 hdr
->len
= cpu_to_le16(skb
->len
);
281 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
285 if (sock_queue_rcv_skb(sk
, nskb
))
289 read_unlock(&hci_sk_list
.lock
);
294 static void send_monitor_event(struct sk_buff
*skb
)
298 BT_DBG("len %d", skb
->len
);
300 read_lock(&hci_sk_list
.lock
);
302 sk_for_each(sk
, &hci_sk_list
.head
) {
303 struct sk_buff
*nskb
;
305 if (sk
->sk_state
!= BT_BOUND
)
308 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
311 nskb
= skb_clone(skb
, GFP_ATOMIC
);
315 if (sock_queue_rcv_skb(sk
, nskb
))
319 read_unlock(&hci_sk_list
.lock
);
322 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
324 struct hci_mon_hdr
*hdr
;
325 struct hci_mon_new_index
*ni
;
331 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
335 ni
= (void *) skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
336 ni
->type
= hdev
->dev_type
;
338 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
339 memcpy(ni
->name
, hdev
->name
, 8);
341 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
345 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
349 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
356 __net_timestamp(skb
);
358 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
359 hdr
->opcode
= opcode
;
360 hdr
->index
= cpu_to_le16(hdev
->id
);
361 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
366 static void send_monitor_replay(struct sock
*sk
)
368 struct hci_dev
*hdev
;
370 read_lock(&hci_dev_list_lock
);
372 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
375 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
379 if (sock_queue_rcv_skb(sk
, skb
))
383 read_unlock(&hci_dev_list_lock
);
386 /* Generate internal stack event */
387 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
389 struct hci_event_hdr
*hdr
;
390 struct hci_ev_stack_internal
*ev
;
393 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
397 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
398 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
399 hdr
->plen
= sizeof(*ev
) + dlen
;
401 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
403 memcpy(ev
->data
, data
, dlen
);
405 bt_cb(skb
)->incoming
= 1;
406 __net_timestamp(skb
);
408 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
409 hci_send_to_sock(hdev
, skb
);
413 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
415 struct hci_ev_si_device ev
;
417 BT_DBG("hdev %s event %d", hdev
->name
, event
);
419 /* Send event to monitor */
420 if (atomic_read(&monitor_promisc
)) {
423 skb
= create_monitor_event(hdev
, event
);
425 send_monitor_event(skb
);
430 /* Send event to sockets */
432 ev
.dev_id
= hdev
->id
;
433 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
435 if (event
== HCI_DEV_UNREG
) {
438 /* Detach sockets from device */
439 read_lock(&hci_sk_list
.lock
);
440 sk_for_each(sk
, &hci_sk_list
.head
) {
441 bh_lock_sock_nested(sk
);
442 if (hci_pi(sk
)->hdev
== hdev
) {
443 hci_pi(sk
)->hdev
= NULL
;
445 sk
->sk_state
= BT_OPEN
;
446 sk
->sk_state_change(sk
);
452 read_unlock(&hci_sk_list
.lock
);
456 static int hci_sock_release(struct socket
*sock
)
458 struct sock
*sk
= sock
->sk
;
459 struct hci_dev
*hdev
;
461 BT_DBG("sock %p sk %p", sock
, sk
);
466 hdev
= hci_pi(sk
)->hdev
;
468 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
469 atomic_dec(&monitor_promisc
);
471 bt_sock_unlink(&hci_sk_list
, sk
);
474 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
475 mgmt_index_added(hdev
);
476 clear_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
);
477 hci_dev_close(hdev
->id
);
480 atomic_dec(&hdev
->promisc
);
486 skb_queue_purge(&sk
->sk_receive_queue
);
487 skb_queue_purge(&sk
->sk_write_queue
);
493 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
498 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
503 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
505 hci_dev_unlock(hdev
);
510 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
515 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
520 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
522 hci_dev_unlock(hdev
);
527 /* Ioctls that require bound socket */
528 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
531 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
536 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
539 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
542 if (hdev
->dev_type
!= HCI_BREDR
)
547 if (!capable(CAP_NET_ADMIN
))
552 return hci_get_conn_info(hdev
, (void __user
*) arg
);
555 return hci_get_auth_info(hdev
, (void __user
*) arg
);
558 if (!capable(CAP_NET_ADMIN
))
560 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
563 if (!capable(CAP_NET_ADMIN
))
565 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
571 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
574 void __user
*argp
= (void __user
*) arg
;
575 struct sock
*sk
= sock
->sk
;
578 BT_DBG("cmd %x arg %lx", cmd
, arg
);
582 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
591 return hci_get_dev_list(argp
);
594 return hci_get_dev_info(argp
);
597 return hci_get_conn_list(argp
);
600 if (!capable(CAP_NET_ADMIN
))
602 return hci_dev_open(arg
);
605 if (!capable(CAP_NET_ADMIN
))
607 return hci_dev_close(arg
);
610 if (!capable(CAP_NET_ADMIN
))
612 return hci_dev_reset(arg
);
615 if (!capable(CAP_NET_ADMIN
))
617 return hci_dev_reset_stat(arg
);
627 if (!capable(CAP_NET_ADMIN
))
629 return hci_dev_cmd(cmd
, argp
);
632 return hci_inquiry(argp
);
637 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
644 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
647 struct sockaddr_hci haddr
;
648 struct sock
*sk
= sock
->sk
;
649 struct hci_dev
*hdev
= NULL
;
652 BT_DBG("sock %p sk %p", sock
, sk
);
657 memset(&haddr
, 0, sizeof(haddr
));
658 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
659 memcpy(&haddr
, addr
, len
);
661 if (haddr
.hci_family
!= AF_BLUETOOTH
)
666 if (sk
->sk_state
== BT_BOUND
) {
671 switch (haddr
.hci_channel
) {
672 case HCI_CHANNEL_RAW
:
673 if (hci_pi(sk
)->hdev
) {
678 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
679 hdev
= hci_dev_get(haddr
.hci_dev
);
685 atomic_inc(&hdev
->promisc
);
688 hci_pi(sk
)->hdev
= hdev
;
691 case HCI_CHANNEL_USER
:
692 if (hci_pi(sk
)->hdev
) {
697 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
702 if (!capable(CAP_NET_ADMIN
)) {
707 hdev
= hci_dev_get(haddr
.hci_dev
);
713 if (test_bit(HCI_UP
, &hdev
->flags
) ||
714 test_bit(HCI_INIT
, &hdev
->flags
) ||
715 test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
716 test_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
722 if (test_and_set_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
728 mgmt_index_removed(hdev
);
730 err
= hci_dev_open(hdev
->id
);
732 clear_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
);
733 mgmt_index_added(hdev
);
738 atomic_inc(&hdev
->promisc
);
740 hci_pi(sk
)->hdev
= hdev
;
743 case HCI_CHANNEL_CONTROL
:
744 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
749 if (!capable(CAP_NET_ADMIN
)) {
756 case HCI_CHANNEL_MONITOR
:
757 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
762 if (!capable(CAP_NET_RAW
)) {
767 send_monitor_replay(sk
);
769 atomic_inc(&monitor_promisc
);
778 hci_pi(sk
)->channel
= haddr
.hci_channel
;
779 sk
->sk_state
= BT_BOUND
;
786 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
787 int *addr_len
, int peer
)
789 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
790 struct sock
*sk
= sock
->sk
;
791 struct hci_dev
*hdev
;
794 BT_DBG("sock %p sk %p", sock
, sk
);
801 hdev
= hci_pi(sk
)->hdev
;
807 *addr_len
= sizeof(*haddr
);
808 haddr
->hci_family
= AF_BLUETOOTH
;
809 haddr
->hci_dev
= hdev
->id
;
810 haddr
->hci_channel
= hci_pi(sk
)->channel
;
817 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
820 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
822 if (mask
& HCI_CMSG_DIR
) {
823 int incoming
= bt_cb(skb
)->incoming
;
824 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
828 if (mask
& HCI_CMSG_TSTAMP
) {
830 struct compat_timeval ctv
;
836 skb_get_timestamp(skb
, &tv
);
841 if (!COMPAT_USE_64BIT_TIME
&&
842 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
843 ctv
.tv_sec
= tv
.tv_sec
;
844 ctv
.tv_usec
= tv
.tv_usec
;
850 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
854 static int hci_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
855 struct msghdr
*msg
, size_t len
, int flags
)
857 int noblock
= flags
& MSG_DONTWAIT
;
858 struct sock
*sk
= sock
->sk
;
862 BT_DBG("sock %p, sk %p", sock
, sk
);
864 if (flags
& (MSG_OOB
))
867 if (sk
->sk_state
== BT_CLOSED
)
870 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
876 msg
->msg_flags
|= MSG_TRUNC
;
880 skb_reset_transport_header(skb
);
881 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
883 switch (hci_pi(sk
)->channel
) {
884 case HCI_CHANNEL_RAW
:
885 hci_sock_cmsg(sk
, msg
, skb
);
887 case HCI_CHANNEL_USER
:
888 case HCI_CHANNEL_CONTROL
:
889 case HCI_CHANNEL_MONITOR
:
890 sock_recv_timestamp(msg
, sk
, skb
);
894 skb_free_datagram(sk
, skb
);
896 return err
? : copied
;
899 static int hci_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
900 struct msghdr
*msg
, size_t len
)
902 struct sock
*sk
= sock
->sk
;
903 struct hci_dev
*hdev
;
907 BT_DBG("sock %p sk %p", sock
, sk
);
909 if (msg
->msg_flags
& MSG_OOB
)
912 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
915 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
920 switch (hci_pi(sk
)->channel
) {
921 case HCI_CHANNEL_RAW
:
922 case HCI_CHANNEL_USER
:
924 case HCI_CHANNEL_CONTROL
:
925 err
= mgmt_control(sk
, msg
, len
);
927 case HCI_CHANNEL_MONITOR
:
935 hdev
= hci_pi(sk
)->hdev
;
941 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
946 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
950 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
955 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
958 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
959 /* No permission check is needed for user channel
960 * since that gets enforced when binding the socket.
962 * However check that the packet type is valid.
964 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
965 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
966 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
971 skb_queue_tail(&hdev
->raw_q
, skb
);
972 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
973 } else if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
974 u16 opcode
= get_unaligned_le16(skb
->data
);
975 u16 ogf
= hci_opcode_ogf(opcode
);
976 u16 ocf
= hci_opcode_ocf(opcode
);
978 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
979 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
980 &hci_sec_filter
.ocf_mask
[ogf
])) &&
981 !capable(CAP_NET_RAW
)) {
987 skb_queue_tail(&hdev
->raw_q
, skb
);
988 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
990 /* Stand-alone HCI commands must be flaged as
991 * single-command requests.
993 bt_cb(skb
)->req
.start
= true;
995 skb_queue_tail(&hdev
->cmd_q
, skb
);
996 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
999 if (!capable(CAP_NET_RAW
)) {
1004 skb_queue_tail(&hdev
->raw_q
, skb
);
1005 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1019 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1020 char __user
*optval
, unsigned int len
)
1022 struct hci_ufilter uf
= { .opcode
= 0 };
1023 struct sock
*sk
= sock
->sk
;
1024 int err
= 0, opt
= 0;
1026 BT_DBG("sk %p, opt %d", sk
, optname
);
1030 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1037 if (get_user(opt
, (int __user
*)optval
)) {
1043 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1045 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1048 case HCI_TIME_STAMP
:
1049 if (get_user(opt
, (int __user
*)optval
)) {
1055 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1057 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1062 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1064 uf
.type_mask
= f
->type_mask
;
1065 uf
.opcode
= f
->opcode
;
1066 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1067 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1070 len
= min_t(unsigned int, len
, sizeof(uf
));
1071 if (copy_from_user(&uf
, optval
, len
)) {
1076 if (!capable(CAP_NET_RAW
)) {
1077 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1078 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1079 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1083 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1085 f
->type_mask
= uf
.type_mask
;
1086 f
->opcode
= uf
.opcode
;
1087 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1088 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1102 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1103 char __user
*optval
, int __user
*optlen
)
1105 struct hci_ufilter uf
;
1106 struct sock
*sk
= sock
->sk
;
1107 int len
, opt
, err
= 0;
1109 BT_DBG("sk %p, opt %d", sk
, optname
);
1111 if (get_user(len
, optlen
))
1116 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1123 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1128 if (put_user(opt
, optval
))
1132 case HCI_TIME_STAMP
:
1133 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1138 if (put_user(opt
, optval
))
1144 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1146 memset(&uf
, 0, sizeof(uf
));
1147 uf
.type_mask
= f
->type_mask
;
1148 uf
.opcode
= f
->opcode
;
1149 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1150 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1153 len
= min_t(unsigned int, len
, sizeof(uf
));
1154 if (copy_to_user(optval
, &uf
, len
))
1168 static const struct proto_ops hci_sock_ops
= {
1169 .family
= PF_BLUETOOTH
,
1170 .owner
= THIS_MODULE
,
1171 .release
= hci_sock_release
,
1172 .bind
= hci_sock_bind
,
1173 .getname
= hci_sock_getname
,
1174 .sendmsg
= hci_sock_sendmsg
,
1175 .recvmsg
= hci_sock_recvmsg
,
1176 .ioctl
= hci_sock_ioctl
,
1177 .poll
= datagram_poll
,
1178 .listen
= sock_no_listen
,
1179 .shutdown
= sock_no_shutdown
,
1180 .setsockopt
= hci_sock_setsockopt
,
1181 .getsockopt
= hci_sock_getsockopt
,
1182 .connect
= sock_no_connect
,
1183 .socketpair
= sock_no_socketpair
,
1184 .accept
= sock_no_accept
,
1185 .mmap
= sock_no_mmap
1188 static struct proto hci_sk_proto
= {
1190 .owner
= THIS_MODULE
,
1191 .obj_size
= sizeof(struct hci_pinfo
)
1194 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1199 BT_DBG("sock %p", sock
);
1201 if (sock
->type
!= SOCK_RAW
)
1202 return -ESOCKTNOSUPPORT
;
1204 sock
->ops
= &hci_sock_ops
;
1206 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
);
1210 sock_init_data(sock
, sk
);
1212 sock_reset_flag(sk
, SOCK_ZAPPED
);
1214 sk
->sk_protocol
= protocol
;
1216 sock
->state
= SS_UNCONNECTED
;
1217 sk
->sk_state
= BT_OPEN
;
1219 bt_sock_link(&hci_sk_list
, sk
);
1223 static const struct net_proto_family hci_sock_family_ops
= {
1224 .family
= PF_BLUETOOTH
,
1225 .owner
= THIS_MODULE
,
1226 .create
= hci_sock_create
,
1229 int __init
hci_sock_init(void)
1233 err
= proto_register(&hci_sk_proto
, 0);
1237 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1239 BT_ERR("HCI socket registration failed");
1243 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
1245 BT_ERR("Failed to create HCI proc file");
1246 bt_sock_unregister(BTPROTO_HCI
);
1250 BT_INFO("HCI socket layer initialized");
1255 proto_unregister(&hci_sk_proto
);
1259 void hci_sock_cleanup(void)
1261 bt_procfs_cleanup(&init_net
, "hci");
1262 bt_sock_unregister(BTPROTO_HCI
);
1263 proto_unregister(&hci_sk_proto
);