Linux 4.3
[linux/fpc-iii.git] / net / bluetooth / hci_sock.c
blobf2d30d1156c92ed7cb8c05dfbe737f1615ad50a7
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
53 unsigned long flags;
56 void hci_sock_set_flag(struct sock *sk, int nr)
58 set_bit(nr, &hci_pi(sk)->flags);
61 void hci_sock_clear_flag(struct sock *sk, int nr)
63 clear_bit(nr, &hci_pi(sk)->flags);
66 int hci_sock_test_flag(struct sock *sk, int nr)
68 return test_bit(nr, &hci_pi(sk)->flags);
71 unsigned short hci_sock_get_channel(struct sock *sk)
73 return hci_pi(sk)->channel;
76 static inline int hci_test_bit(int nr, const void *addr)
78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90 static const struct hci_sec_filter hci_sec_filter = {
91 /* Packet types */
92 0x10,
93 /* Events */
94 { 0x1000d9fe, 0x0000b00c },
95 /* Commands */
97 { 0x0 },
98 /* OGF_LINK_CTL */
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 /* OGF_HOST_CTL */
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 /* OGF_INFO_PARAM */
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list = {
112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
117 struct hci_filter *flt;
118 int flt_type, flt_event;
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
152 return false;
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
158 struct sock *sk;
159 struct sk_buff *skb_copy = NULL;
161 BT_DBG("hdev %p len %d", hdev, skb->len);
163 read_lock(&hci_sk_list.lock);
165 sk_for_each(sk, &hci_sk_list.head) {
166 struct sk_buff *nskb;
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
187 continue;
190 if (!skb_copy) {
191 /* Create a private copy with headroom */
192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193 if (!skb_copy)
194 continue;
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
201 if (!nskb)
202 continue;
204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
208 read_unlock(&hci_sk_list.lock);
210 kfree_skb(skb_copy);
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215 int flag, struct sock *skip_sk)
217 struct sock *sk;
219 BT_DBG("channel %u len %d", channel, skb->len);
221 read_lock(&hci_sk_list.lock);
223 sk_for_each(sk, &hci_sk_list.head) {
224 struct sk_buff *nskb;
226 /* Ignore socket without the flag set */
227 if (!hci_sock_test_flag(sk, flag))
228 continue;
230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
234 if (sk->sk_state != BT_BOUND)
235 continue;
237 if (hci_pi(sk)->channel != channel)
238 continue;
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
248 read_unlock(&hci_sk_list.lock);
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
254 struct sk_buff *skb_copy = NULL;
255 struct hci_mon_hdr *hdr;
256 __le16 opcode;
258 if (!atomic_read(&monitor_promisc))
259 return;
261 BT_DBG("hdev %p len %d", hdev, skb->len);
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
266 break;
267 case HCI_EVENT_PKT:
268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
273 else
274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
279 else
280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
281 break;
282 default:
283 return;
286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
299 kfree_skb(skb_copy);
302 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
322 break;
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
330 break;
332 default:
333 return NULL;
336 __net_timestamp(skb);
338 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 hdr->opcode = opcode;
340 hdr->index = cpu_to_le16(hdev->id);
341 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343 return skb;
346 static void send_monitor_replay(struct sock *sk)
348 struct hci_dev *hdev;
350 read_lock(&hci_dev_list_lock);
352 list_for_each_entry(hdev, &hci_dev_list, list) {
353 struct sk_buff *skb;
355 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (!skb)
357 continue;
359 if (sock_queue_rcv_skb(sk, skb))
360 kfree_skb(skb);
363 read_unlock(&hci_dev_list_lock);
366 /* Generate internal stack event */
367 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
369 struct hci_event_hdr *hdr;
370 struct hci_ev_stack_internal *ev;
371 struct sk_buff *skb;
373 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 if (!skb)
375 return;
377 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 hdr->evt = HCI_EV_STACK_INTERNAL;
379 hdr->plen = sizeof(*ev) + dlen;
381 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 ev->type = type;
383 memcpy(ev->data, data, dlen);
385 bt_cb(skb)->incoming = 1;
386 __net_timestamp(skb);
388 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
389 hci_send_to_sock(hdev, skb);
390 kfree_skb(skb);
393 void hci_sock_dev_event(struct hci_dev *hdev, int event)
395 struct hci_ev_si_device ev;
397 BT_DBG("hdev %s event %d", hdev->name, event);
399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc)) {
401 struct sk_buff *skb;
403 skb = create_monitor_event(hdev, event);
404 if (skb) {
405 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 HCI_SOCK_TRUSTED, NULL);
407 kfree_skb(skb);
411 /* Send event to sockets */
412 ev.event = event;
413 ev.dev_id = hdev->id;
414 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
416 if (event == HCI_DEV_UNREG) {
417 struct sock *sk;
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list.lock);
421 sk_for_each(sk, &hci_sk_list.head) {
422 bh_lock_sock_nested(sk);
423 if (hci_pi(sk)->hdev == hdev) {
424 hci_pi(sk)->hdev = NULL;
425 sk->sk_err = EPIPE;
426 sk->sk_state = BT_OPEN;
427 sk->sk_state_change(sk);
429 hci_dev_put(hdev);
431 bh_unlock_sock(sk);
433 read_unlock(&hci_sk_list.lock);
437 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
439 struct hci_mgmt_chan *c;
441 list_for_each_entry(c, &mgmt_chan_list, list) {
442 if (c->channel == channel)
443 return c;
446 return NULL;
449 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
451 struct hci_mgmt_chan *c;
453 mutex_lock(&mgmt_chan_list_lock);
454 c = __hci_mgmt_chan_find(channel);
455 mutex_unlock(&mgmt_chan_list_lock);
457 return c;
460 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
462 if (c->channel < HCI_CHANNEL_CONTROL)
463 return -EINVAL;
465 mutex_lock(&mgmt_chan_list_lock);
466 if (__hci_mgmt_chan_find(c->channel)) {
467 mutex_unlock(&mgmt_chan_list_lock);
468 return -EALREADY;
471 list_add_tail(&c->list, &mgmt_chan_list);
473 mutex_unlock(&mgmt_chan_list_lock);
475 return 0;
477 EXPORT_SYMBOL(hci_mgmt_chan_register);
479 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
481 mutex_lock(&mgmt_chan_list_lock);
482 list_del(&c->list);
483 mutex_unlock(&mgmt_chan_list_lock);
485 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
487 static int hci_sock_release(struct socket *sock)
489 struct sock *sk = sock->sk;
490 struct hci_dev *hdev;
492 BT_DBG("sock %p sk %p", sock, sk);
494 if (!sk)
495 return 0;
497 hdev = hci_pi(sk)->hdev;
499 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 atomic_dec(&monitor_promisc);
502 bt_sock_unlink(&hci_sk_list, sk);
504 if (hdev) {
505 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
506 hci_dev_close(hdev->id);
507 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
508 mgmt_index_added(hdev);
511 atomic_dec(&hdev->promisc);
512 hci_dev_put(hdev);
515 sock_orphan(sk);
517 skb_queue_purge(&sk->sk_receive_queue);
518 skb_queue_purge(&sk->sk_write_queue);
520 sock_put(sk);
521 return 0;
524 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
526 bdaddr_t bdaddr;
527 int err;
529 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
530 return -EFAULT;
532 hci_dev_lock(hdev);
534 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
536 hci_dev_unlock(hdev);
538 return err;
541 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
543 bdaddr_t bdaddr;
544 int err;
546 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
547 return -EFAULT;
549 hci_dev_lock(hdev);
551 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
553 hci_dev_unlock(hdev);
555 return err;
558 /* Ioctls that require bound socket */
559 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
560 unsigned long arg)
562 struct hci_dev *hdev = hci_pi(sk)->hdev;
564 if (!hdev)
565 return -EBADFD;
567 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
568 return -EBUSY;
570 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
571 return -EOPNOTSUPP;
573 if (hdev->dev_type != HCI_BREDR)
574 return -EOPNOTSUPP;
576 switch (cmd) {
577 case HCISETRAW:
578 if (!capable(CAP_NET_ADMIN))
579 return -EPERM;
580 return -EOPNOTSUPP;
582 case HCIGETCONNINFO:
583 return hci_get_conn_info(hdev, (void __user *) arg);
585 case HCIGETAUTHINFO:
586 return hci_get_auth_info(hdev, (void __user *) arg);
588 case HCIBLOCKADDR:
589 if (!capable(CAP_NET_ADMIN))
590 return -EPERM;
591 return hci_sock_blacklist_add(hdev, (void __user *) arg);
593 case HCIUNBLOCKADDR:
594 if (!capable(CAP_NET_ADMIN))
595 return -EPERM;
596 return hci_sock_blacklist_del(hdev, (void __user *) arg);
599 return -ENOIOCTLCMD;
602 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
603 unsigned long arg)
605 void __user *argp = (void __user *) arg;
606 struct sock *sk = sock->sk;
607 int err;
609 BT_DBG("cmd %x arg %lx", cmd, arg);
611 lock_sock(sk);
613 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
614 err = -EBADFD;
615 goto done;
618 release_sock(sk);
620 switch (cmd) {
621 case HCIGETDEVLIST:
622 return hci_get_dev_list(argp);
624 case HCIGETDEVINFO:
625 return hci_get_dev_info(argp);
627 case HCIGETCONNLIST:
628 return hci_get_conn_list(argp);
630 case HCIDEVUP:
631 if (!capable(CAP_NET_ADMIN))
632 return -EPERM;
633 return hci_dev_open(arg);
635 case HCIDEVDOWN:
636 if (!capable(CAP_NET_ADMIN))
637 return -EPERM;
638 return hci_dev_close(arg);
640 case HCIDEVRESET:
641 if (!capable(CAP_NET_ADMIN))
642 return -EPERM;
643 return hci_dev_reset(arg);
645 case HCIDEVRESTAT:
646 if (!capable(CAP_NET_ADMIN))
647 return -EPERM;
648 return hci_dev_reset_stat(arg);
650 case HCISETSCAN:
651 case HCISETAUTH:
652 case HCISETENCRYPT:
653 case HCISETPTYPE:
654 case HCISETLINKPOL:
655 case HCISETLINKMODE:
656 case HCISETACLMTU:
657 case HCISETSCOMTU:
658 if (!capable(CAP_NET_ADMIN))
659 return -EPERM;
660 return hci_dev_cmd(cmd, argp);
662 case HCIINQUIRY:
663 return hci_inquiry(argp);
666 lock_sock(sk);
668 err = hci_sock_bound_ioctl(sk, cmd, arg);
670 done:
671 release_sock(sk);
672 return err;
675 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
676 int addr_len)
678 struct sockaddr_hci haddr;
679 struct sock *sk = sock->sk;
680 struct hci_dev *hdev = NULL;
681 int len, err = 0;
683 BT_DBG("sock %p sk %p", sock, sk);
685 if (!addr)
686 return -EINVAL;
688 memset(&haddr, 0, sizeof(haddr));
689 len = min_t(unsigned int, sizeof(haddr), addr_len);
690 memcpy(&haddr, addr, len);
692 if (haddr.hci_family != AF_BLUETOOTH)
693 return -EINVAL;
695 lock_sock(sk);
697 if (sk->sk_state == BT_BOUND) {
698 err = -EALREADY;
699 goto done;
702 switch (haddr.hci_channel) {
703 case HCI_CHANNEL_RAW:
704 if (hci_pi(sk)->hdev) {
705 err = -EALREADY;
706 goto done;
709 if (haddr.hci_dev != HCI_DEV_NONE) {
710 hdev = hci_dev_get(haddr.hci_dev);
711 if (!hdev) {
712 err = -ENODEV;
713 goto done;
716 atomic_inc(&hdev->promisc);
719 hci_pi(sk)->hdev = hdev;
720 break;
722 case HCI_CHANNEL_USER:
723 if (hci_pi(sk)->hdev) {
724 err = -EALREADY;
725 goto done;
728 if (haddr.hci_dev == HCI_DEV_NONE) {
729 err = -EINVAL;
730 goto done;
733 if (!capable(CAP_NET_ADMIN)) {
734 err = -EPERM;
735 goto done;
738 hdev = hci_dev_get(haddr.hci_dev);
739 if (!hdev) {
740 err = -ENODEV;
741 goto done;
744 if (test_bit(HCI_INIT, &hdev->flags) ||
745 hci_dev_test_flag(hdev, HCI_SETUP) ||
746 hci_dev_test_flag(hdev, HCI_CONFIG) ||
747 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
748 test_bit(HCI_UP, &hdev->flags))) {
749 err = -EBUSY;
750 hci_dev_put(hdev);
751 goto done;
754 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
755 err = -EUSERS;
756 hci_dev_put(hdev);
757 goto done;
760 mgmt_index_removed(hdev);
762 err = hci_dev_open(hdev->id);
763 if (err) {
764 if (err == -EALREADY) {
765 /* In case the transport is already up and
766 * running, clear the error here.
768 * This can happen when opening an user
769 * channel and HCI_AUTO_OFF grace period
770 * is still active.
772 err = 0;
773 } else {
774 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
775 mgmt_index_added(hdev);
776 hci_dev_put(hdev);
777 goto done;
781 atomic_inc(&hdev->promisc);
783 hci_pi(sk)->hdev = hdev;
784 break;
786 case HCI_CHANNEL_MONITOR:
787 if (haddr.hci_dev != HCI_DEV_NONE) {
788 err = -EINVAL;
789 goto done;
792 if (!capable(CAP_NET_RAW)) {
793 err = -EPERM;
794 goto done;
797 /* The monitor interface is restricted to CAP_NET_RAW
798 * capabilities and with that implicitly trusted.
800 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
802 send_monitor_replay(sk);
804 atomic_inc(&monitor_promisc);
805 break;
807 default:
808 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
809 err = -EINVAL;
810 goto done;
813 if (haddr.hci_dev != HCI_DEV_NONE) {
814 err = -EINVAL;
815 goto done;
818 /* Users with CAP_NET_ADMIN capabilities are allowed
819 * access to all management commands and events. For
820 * untrusted users the interface is restricted and
821 * also only untrusted events are sent.
823 if (capable(CAP_NET_ADMIN))
824 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
826 /* At the moment the index and unconfigured index events
827 * are enabled unconditionally. Setting them on each
828 * socket when binding keeps this functionality. They
829 * however might be cleared later and then sending of these
830 * events will be disabled, but that is then intentional.
832 * This also enables generic events that are safe to be
833 * received by untrusted users. Example for such events
834 * are changes to settings, class of device, name etc.
836 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
837 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
838 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
839 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
841 break;
845 hci_pi(sk)->channel = haddr.hci_channel;
846 sk->sk_state = BT_BOUND;
848 done:
849 release_sock(sk);
850 return err;
853 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
854 int *addr_len, int peer)
856 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
857 struct sock *sk = sock->sk;
858 struct hci_dev *hdev;
859 int err = 0;
861 BT_DBG("sock %p sk %p", sock, sk);
863 if (peer)
864 return -EOPNOTSUPP;
866 lock_sock(sk);
868 hdev = hci_pi(sk)->hdev;
869 if (!hdev) {
870 err = -EBADFD;
871 goto done;
874 *addr_len = sizeof(*haddr);
875 haddr->hci_family = AF_BLUETOOTH;
876 haddr->hci_dev = hdev->id;
877 haddr->hci_channel= hci_pi(sk)->channel;
879 done:
880 release_sock(sk);
881 return err;
884 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
885 struct sk_buff *skb)
887 __u32 mask = hci_pi(sk)->cmsg_mask;
889 if (mask & HCI_CMSG_DIR) {
890 int incoming = bt_cb(skb)->incoming;
891 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
892 &incoming);
895 if (mask & HCI_CMSG_TSTAMP) {
896 #ifdef CONFIG_COMPAT
897 struct compat_timeval ctv;
898 #endif
899 struct timeval tv;
900 void *data;
901 int len;
903 skb_get_timestamp(skb, &tv);
905 data = &tv;
906 len = sizeof(tv);
907 #ifdef CONFIG_COMPAT
908 if (!COMPAT_USE_64BIT_TIME &&
909 (msg->msg_flags & MSG_CMSG_COMPAT)) {
910 ctv.tv_sec = tv.tv_sec;
911 ctv.tv_usec = tv.tv_usec;
912 data = &ctv;
913 len = sizeof(ctv);
915 #endif
917 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
921 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
922 int flags)
924 int noblock = flags & MSG_DONTWAIT;
925 struct sock *sk = sock->sk;
926 struct sk_buff *skb;
927 int copied, err;
929 BT_DBG("sock %p, sk %p", sock, sk);
931 if (flags & (MSG_OOB))
932 return -EOPNOTSUPP;
934 if (sk->sk_state == BT_CLOSED)
935 return 0;
937 skb = skb_recv_datagram(sk, flags, noblock, &err);
938 if (!skb)
939 return err;
941 copied = skb->len;
942 if (len < copied) {
943 msg->msg_flags |= MSG_TRUNC;
944 copied = len;
947 skb_reset_transport_header(skb);
948 err = skb_copy_datagram_msg(skb, 0, msg, copied);
950 switch (hci_pi(sk)->channel) {
951 case HCI_CHANNEL_RAW:
952 hci_sock_cmsg(sk, msg, skb);
953 break;
954 case HCI_CHANNEL_USER:
955 case HCI_CHANNEL_MONITOR:
956 sock_recv_timestamp(msg, sk, skb);
957 break;
958 default:
959 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
960 sock_recv_timestamp(msg, sk, skb);
961 break;
964 skb_free_datagram(sk, skb);
966 return err ? : copied;
969 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
970 struct msghdr *msg, size_t msglen)
972 void *buf;
973 u8 *cp;
974 struct mgmt_hdr *hdr;
975 u16 opcode, index, len;
976 struct hci_dev *hdev = NULL;
977 const struct hci_mgmt_handler *handler;
978 bool var_len, no_hdev;
979 int err;
981 BT_DBG("got %zu bytes", msglen);
983 if (msglen < sizeof(*hdr))
984 return -EINVAL;
986 buf = kmalloc(msglen, GFP_KERNEL);
987 if (!buf)
988 return -ENOMEM;
990 if (memcpy_from_msg(buf, msg, msglen)) {
991 err = -EFAULT;
992 goto done;
995 hdr = buf;
996 opcode = __le16_to_cpu(hdr->opcode);
997 index = __le16_to_cpu(hdr->index);
998 len = __le16_to_cpu(hdr->len);
1000 if (len != msglen - sizeof(*hdr)) {
1001 err = -EINVAL;
1002 goto done;
1005 if (opcode >= chan->handler_count ||
1006 chan->handlers[opcode].func == NULL) {
1007 BT_DBG("Unknown op %u", opcode);
1008 err = mgmt_cmd_status(sk, index, opcode,
1009 MGMT_STATUS_UNKNOWN_COMMAND);
1010 goto done;
1013 handler = &chan->handlers[opcode];
1015 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1016 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1017 err = mgmt_cmd_status(sk, index, opcode,
1018 MGMT_STATUS_PERMISSION_DENIED);
1019 goto done;
1022 if (index != MGMT_INDEX_NONE) {
1023 hdev = hci_dev_get(index);
1024 if (!hdev) {
1025 err = mgmt_cmd_status(sk, index, opcode,
1026 MGMT_STATUS_INVALID_INDEX);
1027 goto done;
1030 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1031 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1032 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1033 err = mgmt_cmd_status(sk, index, opcode,
1034 MGMT_STATUS_INVALID_INDEX);
1035 goto done;
1038 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1039 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1040 err = mgmt_cmd_status(sk, index, opcode,
1041 MGMT_STATUS_INVALID_INDEX);
1042 goto done;
1046 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1047 if (no_hdev != !hdev) {
1048 err = mgmt_cmd_status(sk, index, opcode,
1049 MGMT_STATUS_INVALID_INDEX);
1050 goto done;
1053 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1054 if ((var_len && len < handler->data_len) ||
1055 (!var_len && len != handler->data_len)) {
1056 err = mgmt_cmd_status(sk, index, opcode,
1057 MGMT_STATUS_INVALID_PARAMS);
1058 goto done;
1061 if (hdev && chan->hdev_init)
1062 chan->hdev_init(sk, hdev);
1064 cp = buf + sizeof(*hdr);
1066 err = handler->func(sk, hdev, cp, len);
1067 if (err < 0)
1068 goto done;
1070 err = msglen;
1072 done:
1073 if (hdev)
1074 hci_dev_put(hdev);
1076 kfree(buf);
1077 return err;
1080 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1081 size_t len)
1083 struct sock *sk = sock->sk;
1084 struct hci_mgmt_chan *chan;
1085 struct hci_dev *hdev;
1086 struct sk_buff *skb;
1087 int err;
1089 BT_DBG("sock %p sk %p", sock, sk);
1091 if (msg->msg_flags & MSG_OOB)
1092 return -EOPNOTSUPP;
1094 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1095 return -EINVAL;
1097 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1098 return -EINVAL;
1100 lock_sock(sk);
1102 switch (hci_pi(sk)->channel) {
1103 case HCI_CHANNEL_RAW:
1104 case HCI_CHANNEL_USER:
1105 break;
1106 case HCI_CHANNEL_MONITOR:
1107 err = -EOPNOTSUPP;
1108 goto done;
1109 default:
1110 mutex_lock(&mgmt_chan_list_lock);
1111 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1112 if (chan)
1113 err = hci_mgmt_cmd(chan, sk, msg, len);
1114 else
1115 err = -EINVAL;
1117 mutex_unlock(&mgmt_chan_list_lock);
1118 goto done;
1121 hdev = hci_pi(sk)->hdev;
1122 if (!hdev) {
1123 err = -EBADFD;
1124 goto done;
1127 if (!test_bit(HCI_UP, &hdev->flags)) {
1128 err = -ENETDOWN;
1129 goto done;
1132 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1133 if (!skb)
1134 goto done;
1136 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1137 err = -EFAULT;
1138 goto drop;
1141 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1142 skb_pull(skb, 1);
1144 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1145 /* No permission check is needed for user channel
1146 * since that gets enforced when binding the socket.
1148 * However check that the packet type is valid.
1150 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1151 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1152 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1153 err = -EINVAL;
1154 goto drop;
1157 skb_queue_tail(&hdev->raw_q, skb);
1158 queue_work(hdev->workqueue, &hdev->tx_work);
1159 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1160 u16 opcode = get_unaligned_le16(skb->data);
1161 u16 ogf = hci_opcode_ogf(opcode);
1162 u16 ocf = hci_opcode_ocf(opcode);
1164 if (((ogf > HCI_SFLT_MAX_OGF) ||
1165 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1166 &hci_sec_filter.ocf_mask[ogf])) &&
1167 !capable(CAP_NET_RAW)) {
1168 err = -EPERM;
1169 goto drop;
1172 if (ogf == 0x3f) {
1173 skb_queue_tail(&hdev->raw_q, skb);
1174 queue_work(hdev->workqueue, &hdev->tx_work);
1175 } else {
1176 /* Stand-alone HCI commands must be flagged as
1177 * single-command requests.
1179 bt_cb(skb)->req.start = true;
1181 skb_queue_tail(&hdev->cmd_q, skb);
1182 queue_work(hdev->workqueue, &hdev->cmd_work);
1184 } else {
1185 if (!capable(CAP_NET_RAW)) {
1186 err = -EPERM;
1187 goto drop;
1190 skb_queue_tail(&hdev->raw_q, skb);
1191 queue_work(hdev->workqueue, &hdev->tx_work);
1194 err = len;
1196 done:
1197 release_sock(sk);
1198 return err;
1200 drop:
1201 kfree_skb(skb);
1202 goto done;
1205 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1206 char __user *optval, unsigned int len)
1208 struct hci_ufilter uf = { .opcode = 0 };
1209 struct sock *sk = sock->sk;
1210 int err = 0, opt = 0;
1212 BT_DBG("sk %p, opt %d", sk, optname);
1214 lock_sock(sk);
1216 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1217 err = -EBADFD;
1218 goto done;
1221 switch (optname) {
1222 case HCI_DATA_DIR:
1223 if (get_user(opt, (int __user *)optval)) {
1224 err = -EFAULT;
1225 break;
1228 if (opt)
1229 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1230 else
1231 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1232 break;
1234 case HCI_TIME_STAMP:
1235 if (get_user(opt, (int __user *)optval)) {
1236 err = -EFAULT;
1237 break;
1240 if (opt)
1241 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1242 else
1243 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1244 break;
1246 case HCI_FILTER:
1248 struct hci_filter *f = &hci_pi(sk)->filter;
1250 uf.type_mask = f->type_mask;
1251 uf.opcode = f->opcode;
1252 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1253 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1256 len = min_t(unsigned int, len, sizeof(uf));
1257 if (copy_from_user(&uf, optval, len)) {
1258 err = -EFAULT;
1259 break;
1262 if (!capable(CAP_NET_RAW)) {
1263 uf.type_mask &= hci_sec_filter.type_mask;
1264 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1265 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1269 struct hci_filter *f = &hci_pi(sk)->filter;
1271 f->type_mask = uf.type_mask;
1272 f->opcode = uf.opcode;
1273 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1274 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1276 break;
1278 default:
1279 err = -ENOPROTOOPT;
1280 break;
1283 done:
1284 release_sock(sk);
1285 return err;
1288 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1289 char __user *optval, int __user *optlen)
1291 struct hci_ufilter uf;
1292 struct sock *sk = sock->sk;
1293 int len, opt, err = 0;
1295 BT_DBG("sk %p, opt %d", sk, optname);
1297 if (get_user(len, optlen))
1298 return -EFAULT;
1300 lock_sock(sk);
1302 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1303 err = -EBADFD;
1304 goto done;
1307 switch (optname) {
1308 case HCI_DATA_DIR:
1309 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1310 opt = 1;
1311 else
1312 opt = 0;
1314 if (put_user(opt, optval))
1315 err = -EFAULT;
1316 break;
1318 case HCI_TIME_STAMP:
1319 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1320 opt = 1;
1321 else
1322 opt = 0;
1324 if (put_user(opt, optval))
1325 err = -EFAULT;
1326 break;
1328 case HCI_FILTER:
1330 struct hci_filter *f = &hci_pi(sk)->filter;
1332 memset(&uf, 0, sizeof(uf));
1333 uf.type_mask = f->type_mask;
1334 uf.opcode = f->opcode;
1335 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1336 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1339 len = min_t(unsigned int, len, sizeof(uf));
1340 if (copy_to_user(optval, &uf, len))
1341 err = -EFAULT;
1342 break;
1344 default:
1345 err = -ENOPROTOOPT;
1346 break;
1349 done:
1350 release_sock(sk);
1351 return err;
1354 static const struct proto_ops hci_sock_ops = {
1355 .family = PF_BLUETOOTH,
1356 .owner = THIS_MODULE,
1357 .release = hci_sock_release,
1358 .bind = hci_sock_bind,
1359 .getname = hci_sock_getname,
1360 .sendmsg = hci_sock_sendmsg,
1361 .recvmsg = hci_sock_recvmsg,
1362 .ioctl = hci_sock_ioctl,
1363 .poll = datagram_poll,
1364 .listen = sock_no_listen,
1365 .shutdown = sock_no_shutdown,
1366 .setsockopt = hci_sock_setsockopt,
1367 .getsockopt = hci_sock_getsockopt,
1368 .connect = sock_no_connect,
1369 .socketpair = sock_no_socketpair,
1370 .accept = sock_no_accept,
1371 .mmap = sock_no_mmap
1374 static struct proto hci_sk_proto = {
1375 .name = "HCI",
1376 .owner = THIS_MODULE,
1377 .obj_size = sizeof(struct hci_pinfo)
1380 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1381 int kern)
1383 struct sock *sk;
1385 BT_DBG("sock %p", sock);
1387 if (sock->type != SOCK_RAW)
1388 return -ESOCKTNOSUPPORT;
1390 sock->ops = &hci_sock_ops;
1392 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1393 if (!sk)
1394 return -ENOMEM;
1396 sock_init_data(sock, sk);
1398 sock_reset_flag(sk, SOCK_ZAPPED);
1400 sk->sk_protocol = protocol;
1402 sock->state = SS_UNCONNECTED;
1403 sk->sk_state = BT_OPEN;
1405 bt_sock_link(&hci_sk_list, sk);
1406 return 0;
1409 static const struct net_proto_family hci_sock_family_ops = {
1410 .family = PF_BLUETOOTH,
1411 .owner = THIS_MODULE,
1412 .create = hci_sock_create,
1415 int __init hci_sock_init(void)
1417 int err;
1419 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1421 err = proto_register(&hci_sk_proto, 0);
1422 if (err < 0)
1423 return err;
1425 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1426 if (err < 0) {
1427 BT_ERR("HCI socket registration failed");
1428 goto error;
1431 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1432 if (err < 0) {
1433 BT_ERR("Failed to create HCI proc file");
1434 bt_sock_unregister(BTPROTO_HCI);
1435 goto error;
1438 BT_INFO("HCI socket layer initialized");
1440 return 0;
1442 error:
1443 proto_unregister(&hci_sk_proto);
1444 return err;
1447 void hci_sock_cleanup(void)
1449 bt_procfs_cleanup(&init_net, "hci");
1450 bt_sock_unregister(BTPROTO_HCI);
1451 proto_unregister(&hci_sk_proto);