HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / net / bluetooth / hci_sock.c
blobea1cd8b21708a6a120ab2395ff0078735f52bdd5
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
53 unsigned long flags;
56 void hci_sock_set_flag(struct sock *sk, int nr)
58 set_bit(nr, &hci_pi(sk)->flags);
61 void hci_sock_clear_flag(struct sock *sk, int nr)
63 clear_bit(nr, &hci_pi(sk)->flags);
66 int hci_sock_test_flag(struct sock *sk, int nr)
68 return test_bit(nr, &hci_pi(sk)->flags);
71 unsigned short hci_sock_get_channel(struct sock *sk)
73 return hci_pi(sk)->channel;
76 static inline int hci_test_bit(int nr, const void *addr)
78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90 static const struct hci_sec_filter hci_sec_filter = {
91 /* Packet types */
92 0x10,
93 /* Events */
94 { 0x1000d9fe, 0x0000b00c },
95 /* Commands */
97 { 0x0 },
98 /* OGF_LINK_CTL */
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 /* OGF_HOST_CTL */
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 /* OGF_INFO_PARAM */
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list = {
112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
117 struct hci_filter *flt;
118 int flt_type, flt_event;
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 return false;
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
149 return false;
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
155 struct sock *sk;
156 struct sk_buff *skb_copy = NULL;
158 BT_DBG("hdev %p len %d", hdev, skb->len);
160 read_lock(&hci_sk_list.lock);
162 sk_for_each(sk, &hci_sk_list.head) {
163 struct sk_buff *nskb;
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
174 bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
175 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
176 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
177 continue;
178 if (is_filtered_packet(sk, skb))
179 continue;
180 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 if (!bt_cb(skb)->incoming)
182 continue;
183 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
184 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
185 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 continue;
187 } else {
188 /* Don't send frame to other channel types */
189 continue;
192 if (!skb_copy) {
193 /* Create a private copy with headroom */
194 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
195 if (!skb_copy)
196 continue;
198 /* Put type byte before the data */
199 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
202 nskb = skb_clone(skb_copy, GFP_ATOMIC);
203 if (!nskb)
204 continue;
206 if (sock_queue_rcv_skb(sk, nskb))
207 kfree_skb(nskb);
210 read_unlock(&hci_sk_list.lock);
212 kfree_skb(skb_copy);
215 /* Send frame to sockets with specific channel */
216 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
217 int flag, struct sock *skip_sk)
219 struct sock *sk;
221 BT_DBG("channel %u len %d", channel, skb->len);
223 read_lock(&hci_sk_list.lock);
225 sk_for_each(sk, &hci_sk_list.head) {
226 struct sk_buff *nskb;
228 /* Ignore socket without the flag set */
229 if (!hci_sock_test_flag(sk, flag))
230 continue;
232 /* Skip the original socket */
233 if (sk == skip_sk)
234 continue;
236 if (sk->sk_state != BT_BOUND)
237 continue;
239 if (hci_pi(sk)->channel != channel)
240 continue;
242 nskb = skb_clone(skb, GFP_ATOMIC);
243 if (!nskb)
244 continue;
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
250 read_unlock(&hci_sk_list.lock);
253 /* Send frame to monitor socket */
254 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
256 struct sk_buff *skb_copy = NULL;
257 struct hci_mon_hdr *hdr;
258 __le16 opcode;
260 if (!atomic_read(&monitor_promisc))
261 return;
263 BT_DBG("hdev %p len %d", hdev, skb->len);
265 switch (bt_cb(skb)->pkt_type) {
266 case HCI_COMMAND_PKT:
267 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 break;
269 case HCI_EVENT_PKT:
270 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
271 break;
272 case HCI_ACLDATA_PKT:
273 if (bt_cb(skb)->incoming)
274 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
275 else
276 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
277 break;
278 case HCI_SCODATA_PKT:
279 if (bt_cb(skb)->incoming)
280 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
281 else
282 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 break;
284 case HCI_DIAG_PKT:
285 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286 break;
287 default:
288 return;
291 /* Create a private copy with headroom */
292 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293 if (!skb_copy)
294 return;
296 /* Put header before the data */
297 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 hdr->opcode = opcode;
299 hdr->index = cpu_to_le16(hdev->id);
300 hdr->len = cpu_to_le16(skb->len);
302 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303 HCI_SOCK_TRUSTED, NULL);
304 kfree_skb(skb_copy);
307 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
309 struct hci_mon_hdr *hdr;
310 struct hci_mon_new_index *ni;
311 struct hci_mon_index_info *ii;
312 struct sk_buff *skb;
313 __le16 opcode;
315 switch (event) {
316 case HCI_DEV_REG:
317 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
321 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
322 ni->type = hdev->dev_type;
323 ni->bus = hdev->bus;
324 bacpy(&ni->bdaddr, &hdev->bdaddr);
325 memcpy(ni->name, hdev->name, 8);
327 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
328 break;
330 case HCI_DEV_UNREG:
331 skb = bt_skb_alloc(0, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
335 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
336 break;
338 case HCI_DEV_SETUP:
339 if (hdev->manufacturer == 0xffff)
340 return NULL;
342 /* fall through */
344 case HCI_DEV_UP:
345 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
346 if (!skb)
347 return NULL;
349 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
350 bacpy(&ii->bdaddr, &hdev->bdaddr);
351 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
353 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
354 break;
356 case HCI_DEV_OPEN:
357 skb = bt_skb_alloc(0, GFP_ATOMIC);
358 if (!skb)
359 return NULL;
361 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
362 break;
364 case HCI_DEV_CLOSE:
365 skb = bt_skb_alloc(0, GFP_ATOMIC);
366 if (!skb)
367 return NULL;
369 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
370 break;
372 default:
373 return NULL;
376 __net_timestamp(skb);
378 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
379 hdr->opcode = opcode;
380 hdr->index = cpu_to_le16(hdev->id);
381 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
383 return skb;
386 static void send_monitor_replay(struct sock *sk)
388 struct hci_dev *hdev;
390 read_lock(&hci_dev_list_lock);
392 list_for_each_entry(hdev, &hci_dev_list, list) {
393 struct sk_buff *skb;
395 skb = create_monitor_event(hdev, HCI_DEV_REG);
396 if (!skb)
397 continue;
399 if (sock_queue_rcv_skb(sk, skb))
400 kfree_skb(skb);
402 if (!test_bit(HCI_RUNNING, &hdev->flags))
403 continue;
405 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
406 if (!skb)
407 continue;
409 if (sock_queue_rcv_skb(sk, skb))
410 kfree_skb(skb);
412 if (test_bit(HCI_UP, &hdev->flags))
413 skb = create_monitor_event(hdev, HCI_DEV_UP);
414 else if (hci_dev_test_flag(hdev, HCI_SETUP))
415 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
416 else
417 skb = NULL;
419 if (skb) {
420 if (sock_queue_rcv_skb(sk, skb))
421 kfree_skb(skb);
425 read_unlock(&hci_dev_list_lock);
428 /* Generate internal stack event */
429 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
431 struct hci_event_hdr *hdr;
432 struct hci_ev_stack_internal *ev;
433 struct sk_buff *skb;
435 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
436 if (!skb)
437 return;
439 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
440 hdr->evt = HCI_EV_STACK_INTERNAL;
441 hdr->plen = sizeof(*ev) + dlen;
443 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
444 ev->type = type;
445 memcpy(ev->data, data, dlen);
447 bt_cb(skb)->incoming = 1;
448 __net_timestamp(skb);
450 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
451 hci_send_to_sock(hdev, skb);
452 kfree_skb(skb);
455 void hci_sock_dev_event(struct hci_dev *hdev, int event)
457 BT_DBG("hdev %s event %d", hdev->name, event);
459 if (atomic_read(&monitor_promisc)) {
460 struct sk_buff *skb;
462 /* Send event to monitor */
463 skb = create_monitor_event(hdev, event);
464 if (skb) {
465 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 HCI_SOCK_TRUSTED, NULL);
467 kfree_skb(skb);
471 if (event <= HCI_DEV_DOWN) {
472 struct hci_ev_si_device ev;
474 /* Send event to sockets */
475 ev.event = event;
476 ev.dev_id = hdev->id;
477 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
480 if (event == HCI_DEV_UNREG) {
481 struct sock *sk;
483 /* Detach sockets from device */
484 read_lock(&hci_sk_list.lock);
485 sk_for_each(sk, &hci_sk_list.head) {
486 bh_lock_sock_nested(sk);
487 if (hci_pi(sk)->hdev == hdev) {
488 hci_pi(sk)->hdev = NULL;
489 sk->sk_err = EPIPE;
490 sk->sk_state = BT_OPEN;
491 sk->sk_state_change(sk);
493 hci_dev_put(hdev);
495 bh_unlock_sock(sk);
497 read_unlock(&hci_sk_list.lock);
501 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
503 struct hci_mgmt_chan *c;
505 list_for_each_entry(c, &mgmt_chan_list, list) {
506 if (c->channel == channel)
507 return c;
510 return NULL;
513 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
515 struct hci_mgmt_chan *c;
517 mutex_lock(&mgmt_chan_list_lock);
518 c = __hci_mgmt_chan_find(channel);
519 mutex_unlock(&mgmt_chan_list_lock);
521 return c;
524 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
526 if (c->channel < HCI_CHANNEL_CONTROL)
527 return -EINVAL;
529 mutex_lock(&mgmt_chan_list_lock);
530 if (__hci_mgmt_chan_find(c->channel)) {
531 mutex_unlock(&mgmt_chan_list_lock);
532 return -EALREADY;
535 list_add_tail(&c->list, &mgmt_chan_list);
537 mutex_unlock(&mgmt_chan_list_lock);
539 return 0;
541 EXPORT_SYMBOL(hci_mgmt_chan_register);
543 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
545 mutex_lock(&mgmt_chan_list_lock);
546 list_del(&c->list);
547 mutex_unlock(&mgmt_chan_list_lock);
549 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
551 static int hci_sock_release(struct socket *sock)
553 struct sock *sk = sock->sk;
554 struct hci_dev *hdev;
556 BT_DBG("sock %p sk %p", sock, sk);
558 if (!sk)
559 return 0;
561 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
562 atomic_dec(&monitor_promisc);
564 bt_sock_unlink(&hci_sk_list, sk);
566 hdev = hci_pi(sk)->hdev;
567 if (hdev) {
568 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
569 /* When releasing an user channel exclusive access,
570 * call hci_dev_do_close directly instead of calling
571 * hci_dev_close to ensure the exclusive access will
572 * be released and the controller brought back down.
574 * The checking of HCI_AUTO_OFF is not needed in this
575 * case since it will have been cleared already when
576 * opening the user channel.
578 hci_dev_do_close(hdev);
579 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
580 mgmt_index_added(hdev);
583 atomic_dec(&hdev->promisc);
584 hci_dev_put(hdev);
587 sock_orphan(sk);
589 skb_queue_purge(&sk->sk_receive_queue);
590 skb_queue_purge(&sk->sk_write_queue);
592 sock_put(sk);
593 return 0;
596 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
598 bdaddr_t bdaddr;
599 int err;
601 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
602 return -EFAULT;
604 hci_dev_lock(hdev);
606 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
608 hci_dev_unlock(hdev);
610 return err;
613 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
615 bdaddr_t bdaddr;
616 int err;
618 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
619 return -EFAULT;
621 hci_dev_lock(hdev);
623 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
625 hci_dev_unlock(hdev);
627 return err;
630 /* Ioctls that require bound socket */
631 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
632 unsigned long arg)
634 struct hci_dev *hdev = hci_pi(sk)->hdev;
636 if (!hdev)
637 return -EBADFD;
639 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
640 return -EBUSY;
642 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
643 return -EOPNOTSUPP;
645 if (hdev->dev_type != HCI_BREDR)
646 return -EOPNOTSUPP;
648 switch (cmd) {
649 case HCISETRAW:
650 if (!capable(CAP_NET_ADMIN))
651 return -EPERM;
652 return -EOPNOTSUPP;
654 case HCIGETCONNINFO:
655 return hci_get_conn_info(hdev, (void __user *) arg);
657 case HCIGETAUTHINFO:
658 return hci_get_auth_info(hdev, (void __user *) arg);
660 case HCIBLOCKADDR:
661 if (!capable(CAP_NET_ADMIN))
662 return -EPERM;
663 return hci_sock_blacklist_add(hdev, (void __user *) arg);
665 case HCIUNBLOCKADDR:
666 if (!capable(CAP_NET_ADMIN))
667 return -EPERM;
668 return hci_sock_blacklist_del(hdev, (void __user *) arg);
671 return -ENOIOCTLCMD;
674 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
675 unsigned long arg)
677 void __user *argp = (void __user *) arg;
678 struct sock *sk = sock->sk;
679 int err;
681 BT_DBG("cmd %x arg %lx", cmd, arg);
683 lock_sock(sk);
685 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
686 err = -EBADFD;
687 goto done;
690 release_sock(sk);
692 switch (cmd) {
693 case HCIGETDEVLIST:
694 return hci_get_dev_list(argp);
696 case HCIGETDEVINFO:
697 return hci_get_dev_info(argp);
699 case HCIGETCONNLIST:
700 return hci_get_conn_list(argp);
702 case HCIDEVUP:
703 if (!capable(CAP_NET_ADMIN))
704 return -EPERM;
705 return hci_dev_open(arg);
707 case HCIDEVDOWN:
708 if (!capable(CAP_NET_ADMIN))
709 return -EPERM;
710 return hci_dev_close(arg);
712 case HCIDEVRESET:
713 if (!capable(CAP_NET_ADMIN))
714 return -EPERM;
715 return hci_dev_reset(arg);
717 case HCIDEVRESTAT:
718 if (!capable(CAP_NET_ADMIN))
719 return -EPERM;
720 return hci_dev_reset_stat(arg);
722 case HCISETSCAN:
723 case HCISETAUTH:
724 case HCISETENCRYPT:
725 case HCISETPTYPE:
726 case HCISETLINKPOL:
727 case HCISETLINKMODE:
728 case HCISETACLMTU:
729 case HCISETSCOMTU:
730 if (!capable(CAP_NET_ADMIN))
731 return -EPERM;
732 return hci_dev_cmd(cmd, argp);
734 case HCIINQUIRY:
735 return hci_inquiry(argp);
738 lock_sock(sk);
740 err = hci_sock_bound_ioctl(sk, cmd, arg);
742 done:
743 release_sock(sk);
744 return err;
747 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
748 int addr_len)
750 struct sockaddr_hci haddr;
751 struct sock *sk = sock->sk;
752 struct hci_dev *hdev = NULL;
753 int len, err = 0;
755 BT_DBG("sock %p sk %p", sock, sk);
757 if (!addr)
758 return -EINVAL;
760 memset(&haddr, 0, sizeof(haddr));
761 len = min_t(unsigned int, sizeof(haddr), addr_len);
762 memcpy(&haddr, addr, len);
764 if (haddr.hci_family != AF_BLUETOOTH)
765 return -EINVAL;
767 lock_sock(sk);
769 if (sk->sk_state == BT_BOUND) {
770 err = -EALREADY;
771 goto done;
774 switch (haddr.hci_channel) {
775 case HCI_CHANNEL_RAW:
776 if (hci_pi(sk)->hdev) {
777 err = -EALREADY;
778 goto done;
781 if (haddr.hci_dev != HCI_DEV_NONE) {
782 hdev = hci_dev_get(haddr.hci_dev);
783 if (!hdev) {
784 err = -ENODEV;
785 goto done;
788 atomic_inc(&hdev->promisc);
791 hci_pi(sk)->hdev = hdev;
792 break;
794 case HCI_CHANNEL_USER:
795 if (hci_pi(sk)->hdev) {
796 err = -EALREADY;
797 goto done;
800 if (haddr.hci_dev == HCI_DEV_NONE) {
801 err = -EINVAL;
802 goto done;
805 if (!capable(CAP_NET_ADMIN)) {
806 err = -EPERM;
807 goto done;
810 hdev = hci_dev_get(haddr.hci_dev);
811 if (!hdev) {
812 err = -ENODEV;
813 goto done;
816 if (test_bit(HCI_INIT, &hdev->flags) ||
817 hci_dev_test_flag(hdev, HCI_SETUP) ||
818 hci_dev_test_flag(hdev, HCI_CONFIG) ||
819 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
820 test_bit(HCI_UP, &hdev->flags))) {
821 err = -EBUSY;
822 hci_dev_put(hdev);
823 goto done;
826 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
827 err = -EUSERS;
828 hci_dev_put(hdev);
829 goto done;
832 mgmt_index_removed(hdev);
834 err = hci_dev_open(hdev->id);
835 if (err) {
836 if (err == -EALREADY) {
837 /* In case the transport is already up and
838 * running, clear the error here.
840 * This can happen when opening an user
841 * channel and HCI_AUTO_OFF grace period
842 * is still active.
844 err = 0;
845 } else {
846 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
847 mgmt_index_added(hdev);
848 hci_dev_put(hdev);
849 goto done;
853 atomic_inc(&hdev->promisc);
855 hci_pi(sk)->hdev = hdev;
856 break;
858 case HCI_CHANNEL_MONITOR:
859 if (haddr.hci_dev != HCI_DEV_NONE) {
860 err = -EINVAL;
861 goto done;
864 if (!capable(CAP_NET_RAW)) {
865 err = -EPERM;
866 goto done;
869 /* The monitor interface is restricted to CAP_NET_RAW
870 * capabilities and with that implicitly trusted.
872 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
874 send_monitor_replay(sk);
876 atomic_inc(&monitor_promisc);
877 break;
879 default:
880 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
881 err = -EINVAL;
882 goto done;
885 if (haddr.hci_dev != HCI_DEV_NONE) {
886 err = -EINVAL;
887 goto done;
890 /* Users with CAP_NET_ADMIN capabilities are allowed
891 * access to all management commands and events. For
892 * untrusted users the interface is restricted and
893 * also only untrusted events are sent.
895 if (capable(CAP_NET_ADMIN))
896 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
898 /* At the moment the index and unconfigured index events
899 * are enabled unconditionally. Setting them on each
900 * socket when binding keeps this functionality. They
901 * however might be cleared later and then sending of these
902 * events will be disabled, but that is then intentional.
904 * This also enables generic events that are safe to be
905 * received by untrusted users. Example for such events
906 * are changes to settings, class of device, name etc.
908 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
909 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
910 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
911 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
913 break;
917 hci_pi(sk)->channel = haddr.hci_channel;
918 sk->sk_state = BT_BOUND;
920 done:
921 release_sock(sk);
922 return err;
925 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
926 int *addr_len, int peer)
928 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
929 struct sock *sk = sock->sk;
930 struct hci_dev *hdev;
931 int err = 0;
933 BT_DBG("sock %p sk %p", sock, sk);
935 if (peer)
936 return -EOPNOTSUPP;
938 lock_sock(sk);
940 hdev = hci_pi(sk)->hdev;
941 if (!hdev) {
942 err = -EBADFD;
943 goto done;
946 *addr_len = sizeof(*haddr);
947 haddr->hci_family = AF_BLUETOOTH;
948 haddr->hci_dev = hdev->id;
949 haddr->hci_channel= hci_pi(sk)->channel;
951 done:
952 release_sock(sk);
953 return err;
956 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
957 struct sk_buff *skb)
959 __u32 mask = hci_pi(sk)->cmsg_mask;
961 if (mask & HCI_CMSG_DIR) {
962 int incoming = bt_cb(skb)->incoming;
963 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
964 &incoming);
967 if (mask & HCI_CMSG_TSTAMP) {
968 #ifdef CONFIG_COMPAT
969 struct compat_timeval ctv;
970 #endif
971 struct timeval tv;
972 void *data;
973 int len;
975 skb_get_timestamp(skb, &tv);
977 data = &tv;
978 len = sizeof(tv);
979 #ifdef CONFIG_COMPAT
980 if (!COMPAT_USE_64BIT_TIME &&
981 (msg->msg_flags & MSG_CMSG_COMPAT)) {
982 ctv.tv_sec = tv.tv_sec;
983 ctv.tv_usec = tv.tv_usec;
984 data = &ctv;
985 len = sizeof(ctv);
987 #endif
989 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
993 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
994 int flags)
996 int noblock = flags & MSG_DONTWAIT;
997 struct sock *sk = sock->sk;
998 struct sk_buff *skb;
999 int copied, err;
1001 BT_DBG("sock %p, sk %p", sock, sk);
1003 if (flags & MSG_OOB)
1004 return -EOPNOTSUPP;
1006 if (sk->sk_state == BT_CLOSED)
1007 return 0;
1009 skb = skb_recv_datagram(sk, flags, noblock, &err);
1010 if (!skb)
1011 return err;
1013 copied = skb->len;
1014 if (len < copied) {
1015 msg->msg_flags |= MSG_TRUNC;
1016 copied = len;
1019 skb_reset_transport_header(skb);
1020 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1022 switch (hci_pi(sk)->channel) {
1023 case HCI_CHANNEL_RAW:
1024 hci_sock_cmsg(sk, msg, skb);
1025 break;
1026 case HCI_CHANNEL_USER:
1027 case HCI_CHANNEL_MONITOR:
1028 sock_recv_timestamp(msg, sk, skb);
1029 break;
1030 default:
1031 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1032 sock_recv_timestamp(msg, sk, skb);
1033 break;
1036 skb_free_datagram(sk, skb);
1038 return err ? : copied;
1041 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1042 struct msghdr *msg, size_t msglen)
1044 void *buf;
1045 u8 *cp;
1046 struct mgmt_hdr *hdr;
1047 u16 opcode, index, len;
1048 struct hci_dev *hdev = NULL;
1049 const struct hci_mgmt_handler *handler;
1050 bool var_len, no_hdev;
1051 int err;
1053 BT_DBG("got %zu bytes", msglen);
1055 if (msglen < sizeof(*hdr))
1056 return -EINVAL;
1058 buf = kmalloc(msglen, GFP_KERNEL);
1059 if (!buf)
1060 return -ENOMEM;
1062 if (memcpy_from_msg(buf, msg, msglen)) {
1063 err = -EFAULT;
1064 goto done;
1067 hdr = buf;
1068 opcode = __le16_to_cpu(hdr->opcode);
1069 index = __le16_to_cpu(hdr->index);
1070 len = __le16_to_cpu(hdr->len);
1072 if (len != msglen - sizeof(*hdr)) {
1073 err = -EINVAL;
1074 goto done;
1077 if (opcode >= chan->handler_count ||
1078 chan->handlers[opcode].func == NULL) {
1079 BT_DBG("Unknown op %u", opcode);
1080 err = mgmt_cmd_status(sk, index, opcode,
1081 MGMT_STATUS_UNKNOWN_COMMAND);
1082 goto done;
1085 handler = &chan->handlers[opcode];
1087 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1088 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1089 err = mgmt_cmd_status(sk, index, opcode,
1090 MGMT_STATUS_PERMISSION_DENIED);
1091 goto done;
1094 if (index != MGMT_INDEX_NONE) {
1095 hdev = hci_dev_get(index);
1096 if (!hdev) {
1097 err = mgmt_cmd_status(sk, index, opcode,
1098 MGMT_STATUS_INVALID_INDEX);
1099 goto done;
1102 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1103 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1104 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1105 err = mgmt_cmd_status(sk, index, opcode,
1106 MGMT_STATUS_INVALID_INDEX);
1107 goto done;
1110 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1111 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1112 err = mgmt_cmd_status(sk, index, opcode,
1113 MGMT_STATUS_INVALID_INDEX);
1114 goto done;
1118 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1119 if (no_hdev != !hdev) {
1120 err = mgmt_cmd_status(sk, index, opcode,
1121 MGMT_STATUS_INVALID_INDEX);
1122 goto done;
1125 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1126 if ((var_len && len < handler->data_len) ||
1127 (!var_len && len != handler->data_len)) {
1128 err = mgmt_cmd_status(sk, index, opcode,
1129 MGMT_STATUS_INVALID_PARAMS);
1130 goto done;
1133 if (hdev && chan->hdev_init)
1134 chan->hdev_init(sk, hdev);
1136 cp = buf + sizeof(*hdr);
1138 err = handler->func(sk, hdev, cp, len);
1139 if (err < 0)
1140 goto done;
1142 err = msglen;
1144 done:
1145 if (hdev)
1146 hci_dev_put(hdev);
1148 kfree(buf);
1149 return err;
1152 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1153 size_t len)
1155 struct sock *sk = sock->sk;
1156 struct hci_mgmt_chan *chan;
1157 struct hci_dev *hdev;
1158 struct sk_buff *skb;
1159 int err;
1161 BT_DBG("sock %p sk %p", sock, sk);
1163 if (msg->msg_flags & MSG_OOB)
1164 return -EOPNOTSUPP;
1166 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1167 MSG_CMSG_COMPAT))
1168 return -EINVAL;
1170 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1171 return -EINVAL;
1173 lock_sock(sk);
1175 switch (hci_pi(sk)->channel) {
1176 case HCI_CHANNEL_RAW:
1177 case HCI_CHANNEL_USER:
1178 break;
1179 case HCI_CHANNEL_MONITOR:
1180 err = -EOPNOTSUPP;
1181 goto done;
1182 default:
1183 mutex_lock(&mgmt_chan_list_lock);
1184 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1185 if (chan)
1186 err = hci_mgmt_cmd(chan, sk, msg, len);
1187 else
1188 err = -EINVAL;
1190 mutex_unlock(&mgmt_chan_list_lock);
1191 goto done;
1194 hdev = hci_pi(sk)->hdev;
1195 if (!hdev) {
1196 err = -EBADFD;
1197 goto done;
1200 if (!test_bit(HCI_UP, &hdev->flags)) {
1201 err = -ENETDOWN;
1202 goto done;
1205 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1206 if (!skb)
1207 goto done;
1209 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1210 err = -EFAULT;
1211 goto drop;
1214 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1215 skb_pull(skb, 1);
1217 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1218 /* No permission check is needed for user channel
1219 * since that gets enforced when binding the socket.
1221 * However check that the packet type is valid.
1223 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1224 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1225 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1226 err = -EINVAL;
1227 goto drop;
1230 skb_queue_tail(&hdev->raw_q, skb);
1231 queue_work(hdev->workqueue, &hdev->tx_work);
1232 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1233 u16 opcode = get_unaligned_le16(skb->data);
1234 u16 ogf = hci_opcode_ogf(opcode);
1235 u16 ocf = hci_opcode_ocf(opcode);
1237 if (((ogf > HCI_SFLT_MAX_OGF) ||
1238 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1239 &hci_sec_filter.ocf_mask[ogf])) &&
1240 !capable(CAP_NET_RAW)) {
1241 err = -EPERM;
1242 goto drop;
1245 if (ogf == 0x3f) {
1246 skb_queue_tail(&hdev->raw_q, skb);
1247 queue_work(hdev->workqueue, &hdev->tx_work);
1248 } else {
1249 /* Stand-alone HCI commands must be flagged as
1250 * single-command requests.
1252 bt_cb(skb)->hci.req_start = true;
1254 skb_queue_tail(&hdev->cmd_q, skb);
1255 queue_work(hdev->workqueue, &hdev->cmd_work);
1257 } else {
1258 if (!capable(CAP_NET_RAW)) {
1259 err = -EPERM;
1260 goto drop;
1263 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1264 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1265 err = -EINVAL;
1266 goto drop;
1269 skb_queue_tail(&hdev->raw_q, skb);
1270 queue_work(hdev->workqueue, &hdev->tx_work);
1273 err = len;
1275 done:
1276 release_sock(sk);
1277 return err;
1279 drop:
1280 kfree_skb(skb);
1281 goto done;
1284 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1285 char __user *optval, unsigned int len)
1287 struct hci_ufilter uf = { .opcode = 0 };
1288 struct sock *sk = sock->sk;
1289 int err = 0, opt = 0;
1291 BT_DBG("sk %p, opt %d", sk, optname);
1293 lock_sock(sk);
1295 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1296 err = -EBADFD;
1297 goto done;
1300 switch (optname) {
1301 case HCI_DATA_DIR:
1302 if (get_user(opt, (int __user *)optval)) {
1303 err = -EFAULT;
1304 break;
1307 if (opt)
1308 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1309 else
1310 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1311 break;
1313 case HCI_TIME_STAMP:
1314 if (get_user(opt, (int __user *)optval)) {
1315 err = -EFAULT;
1316 break;
1319 if (opt)
1320 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1321 else
1322 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1323 break;
1325 case HCI_FILTER:
1327 struct hci_filter *f = &hci_pi(sk)->filter;
1329 uf.type_mask = f->type_mask;
1330 uf.opcode = f->opcode;
1331 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1332 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1335 len = min_t(unsigned int, len, sizeof(uf));
1336 if (copy_from_user(&uf, optval, len)) {
1337 err = -EFAULT;
1338 break;
1341 if (!capable(CAP_NET_RAW)) {
1342 uf.type_mask &= hci_sec_filter.type_mask;
1343 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1344 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1348 struct hci_filter *f = &hci_pi(sk)->filter;
1350 f->type_mask = uf.type_mask;
1351 f->opcode = uf.opcode;
1352 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1353 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1355 break;
1357 default:
1358 err = -ENOPROTOOPT;
1359 break;
1362 done:
1363 release_sock(sk);
1364 return err;
1367 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1368 char __user *optval, int __user *optlen)
1370 struct hci_ufilter uf;
1371 struct sock *sk = sock->sk;
1372 int len, opt, err = 0;
1374 BT_DBG("sk %p, opt %d", sk, optname);
1376 if (get_user(len, optlen))
1377 return -EFAULT;
1379 lock_sock(sk);
1381 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1382 err = -EBADFD;
1383 goto done;
1386 switch (optname) {
1387 case HCI_DATA_DIR:
1388 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1389 opt = 1;
1390 else
1391 opt = 0;
1393 if (put_user(opt, optval))
1394 err = -EFAULT;
1395 break;
1397 case HCI_TIME_STAMP:
1398 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1399 opt = 1;
1400 else
1401 opt = 0;
1403 if (put_user(opt, optval))
1404 err = -EFAULT;
1405 break;
1407 case HCI_FILTER:
1409 struct hci_filter *f = &hci_pi(sk)->filter;
1411 memset(&uf, 0, sizeof(uf));
1412 uf.type_mask = f->type_mask;
1413 uf.opcode = f->opcode;
1414 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1415 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1418 len = min_t(unsigned int, len, sizeof(uf));
1419 if (copy_to_user(optval, &uf, len))
1420 err = -EFAULT;
1421 break;
1423 default:
1424 err = -ENOPROTOOPT;
1425 break;
1428 done:
1429 release_sock(sk);
1430 return err;
1433 static const struct proto_ops hci_sock_ops = {
1434 .family = PF_BLUETOOTH,
1435 .owner = THIS_MODULE,
1436 .release = hci_sock_release,
1437 .bind = hci_sock_bind,
1438 .getname = hci_sock_getname,
1439 .sendmsg = hci_sock_sendmsg,
1440 .recvmsg = hci_sock_recvmsg,
1441 .ioctl = hci_sock_ioctl,
1442 .poll = datagram_poll,
1443 .listen = sock_no_listen,
1444 .shutdown = sock_no_shutdown,
1445 .setsockopt = hci_sock_setsockopt,
1446 .getsockopt = hci_sock_getsockopt,
1447 .connect = sock_no_connect,
1448 .socketpair = sock_no_socketpair,
1449 .accept = sock_no_accept,
1450 .mmap = sock_no_mmap
1453 static struct proto hci_sk_proto = {
1454 .name = "HCI",
1455 .owner = THIS_MODULE,
1456 .obj_size = sizeof(struct hci_pinfo)
1459 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1460 int kern)
1462 struct sock *sk;
1464 BT_DBG("sock %p", sock);
1466 if (sock->type != SOCK_RAW)
1467 return -ESOCKTNOSUPPORT;
1469 sock->ops = &hci_sock_ops;
1471 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1472 if (!sk)
1473 return -ENOMEM;
1475 sock_init_data(sock, sk);
1477 sock_reset_flag(sk, SOCK_ZAPPED);
1479 sk->sk_protocol = protocol;
1481 sock->state = SS_UNCONNECTED;
1482 sk->sk_state = BT_OPEN;
1484 bt_sock_link(&hci_sk_list, sk);
1485 return 0;
1488 static const struct net_proto_family hci_sock_family_ops = {
1489 .family = PF_BLUETOOTH,
1490 .owner = THIS_MODULE,
1491 .create = hci_sock_create,
1494 int __init hci_sock_init(void)
1496 int err;
1498 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1500 err = proto_register(&hci_sk_proto, 0);
1501 if (err < 0)
1502 return err;
1504 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1505 if (err < 0) {
1506 BT_ERR("HCI socket registration failed");
1507 goto error;
1510 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1511 if (err < 0) {
1512 BT_ERR("Failed to create HCI proc file");
1513 bt_sock_unregister(BTPROTO_HCI);
1514 goto error;
1517 BT_INFO("HCI socket layer initialized");
1519 return 0;
1521 error:
1522 proto_unregister(&hci_sk_proto);
1523 return err;
1526 void hci_sock_cleanup(void)
1528 bt_procfs_cleanup(&init_net, "hci");
1529 bt_sock_unregister(BTPROTO_HCI);
1530 proto_unregister(&hci_sk_proto);