x86/xen: resume timer irqs early
[linux/fpc-iii.git] / net / bluetooth / hci_sock.c
blobfa4bf663142551ef373e946bbbbd25071dc58d50
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
43 /* Security filter */
44 static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
48 { 0x1000d9fe, 0x0000b00c },
49 /* Commands */
51 { 0x0 },
52 /* OGF_LINK_CTL */
53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54 /* OGF_LINK_POLICY */
55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
56 /* OGF_HOST_CTL */
57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58 /* OGF_INFO_PARAM */
59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 /* OGF_STATUS_PARAM */
61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
65 static struct bt_sock_list hci_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
69 /* Send frame to RAW socket */
70 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
72 struct sock *sk;
73 struct sk_buff *skb_copy = NULL;
75 BT_DBG("hdev %p len %d", hdev, skb->len);
77 read_lock(&hci_sk_list.lock);
79 sk_for_each(sk, &hci_sk_list.head) {
80 struct hci_filter *flt;
81 struct sk_buff *nskb;
83 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
84 continue;
86 /* Don't send frame to the socket it came from */
87 if (skb->sk == sk)
88 continue;
90 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
91 continue;
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
96 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
97 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
98 &flt->type_mask))
99 continue;
101 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
102 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
104 if (!hci_test_bit(evt, &flt->event_mask))
105 continue;
107 if (flt->opcode &&
108 ((evt == HCI_EV_CMD_COMPLETE &&
109 flt->opcode !=
110 get_unaligned((__le16 *)(skb->data + 3))) ||
111 (evt == HCI_EV_CMD_STATUS &&
112 flt->opcode !=
113 get_unaligned((__le16 *)(skb->data + 4)))))
114 continue;
117 if (!skb_copy) {
118 /* Create a private copy with headroom */
119 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
120 if (!skb_copy)
121 continue;
123 /* Put type byte before the data */
124 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
127 nskb = skb_clone(skb_copy, GFP_ATOMIC);
128 if (!nskb)
129 continue;
131 if (sock_queue_rcv_skb(sk, nskb))
132 kfree_skb(nskb);
135 read_unlock(&hci_sk_list.lock);
137 kfree_skb(skb_copy);
140 /* Send frame to control socket */
141 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
143 struct sock *sk;
145 BT_DBG("len %d", skb->len);
147 read_lock(&hci_sk_list.lock);
149 sk_for_each(sk, &hci_sk_list.head) {
150 struct sk_buff *nskb;
152 /* Skip the original socket */
153 if (sk == skip_sk)
154 continue;
156 if (sk->sk_state != BT_BOUND)
157 continue;
159 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
160 continue;
162 nskb = skb_clone(skb, GFP_ATOMIC);
163 if (!nskb)
164 continue;
166 if (sock_queue_rcv_skb(sk, nskb))
167 kfree_skb(nskb);
170 read_unlock(&hci_sk_list.lock);
173 /* Send frame to monitor socket */
174 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
176 struct sock *sk;
177 struct sk_buff *skb_copy = NULL;
178 __le16 opcode;
180 if (!atomic_read(&monitor_promisc))
181 return;
183 BT_DBG("hdev %p len %d", hdev, skb->len);
185 switch (bt_cb(skb)->pkt_type) {
186 case HCI_COMMAND_PKT:
187 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
188 break;
189 case HCI_EVENT_PKT:
190 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
191 break;
192 case HCI_ACLDATA_PKT:
193 if (bt_cb(skb)->incoming)
194 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
195 else
196 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
197 break;
198 case HCI_SCODATA_PKT:
199 if (bt_cb(skb)->incoming)
200 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
201 else
202 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
203 break;
204 default:
205 return;
208 read_lock(&hci_sk_list.lock);
210 sk_for_each(sk, &hci_sk_list.head) {
211 struct sk_buff *nskb;
213 if (sk->sk_state != BT_BOUND)
214 continue;
216 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
217 continue;
219 if (!skb_copy) {
220 struct hci_mon_hdr *hdr;
222 /* Create a private copy with headroom */
223 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
224 GFP_ATOMIC);
225 if (!skb_copy)
226 continue;
228 /* Put header before the data */
229 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
230 hdr->opcode = opcode;
231 hdr->index = cpu_to_le16(hdev->id);
232 hdr->len = cpu_to_le16(skb->len);
235 nskb = skb_clone(skb_copy, GFP_ATOMIC);
236 if (!nskb)
237 continue;
239 if (sock_queue_rcv_skb(sk, nskb))
240 kfree_skb(nskb);
243 read_unlock(&hci_sk_list.lock);
245 kfree_skb(skb_copy);
248 static void send_monitor_event(struct sk_buff *skb)
250 struct sock *sk;
252 BT_DBG("len %d", skb->len);
254 read_lock(&hci_sk_list.lock);
256 sk_for_each(sk, &hci_sk_list.head) {
257 struct sk_buff *nskb;
259 if (sk->sk_state != BT_BOUND)
260 continue;
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 continue;
265 nskb = skb_clone(skb, GFP_ATOMIC);
266 if (!nskb)
267 continue;
269 if (sock_queue_rcv_skb(sk, nskb))
270 kfree_skb(nskb);
273 read_unlock(&hci_sk_list.lock);
276 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
278 struct hci_mon_hdr *hdr;
279 struct hci_mon_new_index *ni;
280 struct sk_buff *skb;
281 __le16 opcode;
283 switch (event) {
284 case HCI_DEV_REG:
285 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
286 if (!skb)
287 return NULL;
289 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
290 ni->type = hdev->dev_type;
291 ni->bus = hdev->bus;
292 bacpy(&ni->bdaddr, &hdev->bdaddr);
293 memcpy(ni->name, hdev->name, 8);
295 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
296 break;
298 case HCI_DEV_UNREG:
299 skb = bt_skb_alloc(0, GFP_ATOMIC);
300 if (!skb)
301 return NULL;
303 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
304 break;
306 default:
307 return NULL;
310 __net_timestamp(skb);
312 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
313 hdr->opcode = opcode;
314 hdr->index = cpu_to_le16(hdev->id);
315 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
317 return skb;
320 static void send_monitor_replay(struct sock *sk)
322 struct hci_dev *hdev;
324 read_lock(&hci_dev_list_lock);
326 list_for_each_entry(hdev, &hci_dev_list, list) {
327 struct sk_buff *skb;
329 skb = create_monitor_event(hdev, HCI_DEV_REG);
330 if (!skb)
331 continue;
333 if (sock_queue_rcv_skb(sk, skb))
334 kfree_skb(skb);
337 read_unlock(&hci_dev_list_lock);
340 /* Generate internal stack event */
341 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
343 struct hci_event_hdr *hdr;
344 struct hci_ev_stack_internal *ev;
345 struct sk_buff *skb;
347 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
348 if (!skb)
349 return;
351 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
352 hdr->evt = HCI_EV_STACK_INTERNAL;
353 hdr->plen = sizeof(*ev) + dlen;
355 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
356 ev->type = type;
357 memcpy(ev->data, data, dlen);
359 bt_cb(skb)->incoming = 1;
360 __net_timestamp(skb);
362 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
363 skb->dev = (void *) hdev;
364 hci_send_to_sock(hdev, skb);
365 kfree_skb(skb);
368 void hci_sock_dev_event(struct hci_dev *hdev, int event)
370 struct hci_ev_si_device ev;
372 BT_DBG("hdev %s event %d", hdev->name, event);
374 /* Send event to monitor */
375 if (atomic_read(&monitor_promisc)) {
376 struct sk_buff *skb;
378 skb = create_monitor_event(hdev, event);
379 if (skb) {
380 send_monitor_event(skb);
381 kfree_skb(skb);
385 /* Send event to sockets */
386 ev.event = event;
387 ev.dev_id = hdev->id;
388 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
390 if (event == HCI_DEV_UNREG) {
391 struct sock *sk;
393 /* Detach sockets from device */
394 read_lock(&hci_sk_list.lock);
395 sk_for_each(sk, &hci_sk_list.head) {
396 bh_lock_sock_nested(sk);
397 if (hci_pi(sk)->hdev == hdev) {
398 hci_pi(sk)->hdev = NULL;
399 sk->sk_err = EPIPE;
400 sk->sk_state = BT_OPEN;
401 sk->sk_state_change(sk);
403 hci_dev_put(hdev);
405 bh_unlock_sock(sk);
407 read_unlock(&hci_sk_list.lock);
411 static int hci_sock_release(struct socket *sock)
413 struct sock *sk = sock->sk;
414 struct hci_dev *hdev;
416 BT_DBG("sock %p sk %p", sock, sk);
418 if (!sk)
419 return 0;
421 hdev = hci_pi(sk)->hdev;
423 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
424 atomic_dec(&monitor_promisc);
426 bt_sock_unlink(&hci_sk_list, sk);
428 if (hdev) {
429 atomic_dec(&hdev->promisc);
430 hci_dev_put(hdev);
433 sock_orphan(sk);
435 skb_queue_purge(&sk->sk_receive_queue);
436 skb_queue_purge(&sk->sk_write_queue);
438 sock_put(sk);
439 return 0;
442 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
444 bdaddr_t bdaddr;
445 int err;
447 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
448 return -EFAULT;
450 hci_dev_lock(hdev);
452 err = hci_blacklist_add(hdev, &bdaddr, 0);
454 hci_dev_unlock(hdev);
456 return err;
459 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
461 bdaddr_t bdaddr;
462 int err;
464 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
465 return -EFAULT;
467 hci_dev_lock(hdev);
469 err = hci_blacklist_del(hdev, &bdaddr, 0);
471 hci_dev_unlock(hdev);
473 return err;
476 /* Ioctls that require bound socket */
477 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
478 unsigned long arg)
480 struct hci_dev *hdev = hci_pi(sk)->hdev;
482 if (!hdev)
483 return -EBADFD;
485 switch (cmd) {
486 case HCISETRAW:
487 if (!capable(CAP_NET_ADMIN))
488 return -EPERM;
490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491 return -EPERM;
493 if (arg)
494 set_bit(HCI_RAW, &hdev->flags);
495 else
496 clear_bit(HCI_RAW, &hdev->flags);
498 return 0;
500 case HCIGETCONNINFO:
501 return hci_get_conn_info(hdev, (void __user *) arg);
503 case HCIGETAUTHINFO:
504 return hci_get_auth_info(hdev, (void __user *) arg);
506 case HCIBLOCKADDR:
507 if (!capable(CAP_NET_ADMIN))
508 return -EPERM;
509 return hci_sock_blacklist_add(hdev, (void __user *) arg);
511 case HCIUNBLOCKADDR:
512 if (!capable(CAP_NET_ADMIN))
513 return -EPERM;
514 return hci_sock_blacklist_del(hdev, (void __user *) arg);
516 default:
517 if (hdev->ioctl)
518 return hdev->ioctl(hdev, cmd, arg);
519 return -EINVAL;
523 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
524 unsigned long arg)
526 struct sock *sk = sock->sk;
527 void __user *argp = (void __user *) arg;
528 int err;
530 BT_DBG("cmd %x arg %lx", cmd, arg);
532 switch (cmd) {
533 case HCIGETDEVLIST:
534 return hci_get_dev_list(argp);
536 case HCIGETDEVINFO:
537 return hci_get_dev_info(argp);
539 case HCIGETCONNLIST:
540 return hci_get_conn_list(argp);
542 case HCIDEVUP:
543 if (!capable(CAP_NET_ADMIN))
544 return -EPERM;
545 return hci_dev_open(arg);
547 case HCIDEVDOWN:
548 if (!capable(CAP_NET_ADMIN))
549 return -EPERM;
550 return hci_dev_close(arg);
552 case HCIDEVRESET:
553 if (!capable(CAP_NET_ADMIN))
554 return -EPERM;
555 return hci_dev_reset(arg);
557 case HCIDEVRESTAT:
558 if (!capable(CAP_NET_ADMIN))
559 return -EPERM;
560 return hci_dev_reset_stat(arg);
562 case HCISETSCAN:
563 case HCISETAUTH:
564 case HCISETENCRYPT:
565 case HCISETPTYPE:
566 case HCISETLINKPOL:
567 case HCISETLINKMODE:
568 case HCISETACLMTU:
569 case HCISETSCOMTU:
570 if (!capable(CAP_NET_ADMIN))
571 return -EPERM;
572 return hci_dev_cmd(cmd, argp);
574 case HCIINQUIRY:
575 return hci_inquiry(argp);
577 default:
578 lock_sock(sk);
579 err = hci_sock_bound_ioctl(sk, cmd, arg);
580 release_sock(sk);
581 return err;
585 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
586 int addr_len)
588 struct sockaddr_hci haddr;
589 struct sock *sk = sock->sk;
590 struct hci_dev *hdev = NULL;
591 int len, err = 0;
593 BT_DBG("sock %p sk %p", sock, sk);
595 if (!addr)
596 return -EINVAL;
598 memset(&haddr, 0, sizeof(haddr));
599 len = min_t(unsigned int, sizeof(haddr), addr_len);
600 memcpy(&haddr, addr, len);
602 if (haddr.hci_family != AF_BLUETOOTH)
603 return -EINVAL;
605 lock_sock(sk);
607 if (sk->sk_state == BT_BOUND) {
608 err = -EALREADY;
609 goto done;
612 switch (haddr.hci_channel) {
613 case HCI_CHANNEL_RAW:
614 if (hci_pi(sk)->hdev) {
615 err = -EALREADY;
616 goto done;
619 if (haddr.hci_dev != HCI_DEV_NONE) {
620 hdev = hci_dev_get(haddr.hci_dev);
621 if (!hdev) {
622 err = -ENODEV;
623 goto done;
626 atomic_inc(&hdev->promisc);
629 hci_pi(sk)->hdev = hdev;
630 break;
632 case HCI_CHANNEL_CONTROL:
633 if (haddr.hci_dev != HCI_DEV_NONE) {
634 err = -EINVAL;
635 goto done;
638 if (!capable(CAP_NET_ADMIN)) {
639 err = -EPERM;
640 goto done;
643 break;
645 case HCI_CHANNEL_MONITOR:
646 if (haddr.hci_dev != HCI_DEV_NONE) {
647 err = -EINVAL;
648 goto done;
651 if (!capable(CAP_NET_RAW)) {
652 err = -EPERM;
653 goto done;
656 send_monitor_replay(sk);
658 atomic_inc(&monitor_promisc);
659 break;
661 default:
662 err = -EINVAL;
663 goto done;
667 hci_pi(sk)->channel = haddr.hci_channel;
668 sk->sk_state = BT_BOUND;
670 done:
671 release_sock(sk);
672 return err;
675 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
676 int *addr_len, int peer)
678 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
679 struct sock *sk = sock->sk;
680 struct hci_dev *hdev = hci_pi(sk)->hdev;
682 BT_DBG("sock %p sk %p", sock, sk);
684 if (!hdev)
685 return -EBADFD;
687 lock_sock(sk);
689 *addr_len = sizeof(*haddr);
690 haddr->hci_family = AF_BLUETOOTH;
691 haddr->hci_dev = hdev->id;
692 haddr->hci_channel= 0;
694 release_sock(sk);
695 return 0;
698 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
699 struct sk_buff *skb)
701 __u32 mask = hci_pi(sk)->cmsg_mask;
703 if (mask & HCI_CMSG_DIR) {
704 int incoming = bt_cb(skb)->incoming;
705 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
706 &incoming);
709 if (mask & HCI_CMSG_TSTAMP) {
710 #ifdef CONFIG_COMPAT
711 struct compat_timeval ctv;
712 #endif
713 struct timeval tv;
714 void *data;
715 int len;
717 skb_get_timestamp(skb, &tv);
719 data = &tv;
720 len = sizeof(tv);
721 #ifdef CONFIG_COMPAT
722 if (!COMPAT_USE_64BIT_TIME &&
723 (msg->msg_flags & MSG_CMSG_COMPAT)) {
724 ctv.tv_sec = tv.tv_sec;
725 ctv.tv_usec = tv.tv_usec;
726 data = &ctv;
727 len = sizeof(ctv);
729 #endif
731 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
735 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
736 struct msghdr *msg, size_t len, int flags)
738 int noblock = flags & MSG_DONTWAIT;
739 struct sock *sk = sock->sk;
740 struct sk_buff *skb;
741 int copied, err;
743 BT_DBG("sock %p, sk %p", sock, sk);
745 if (flags & (MSG_OOB))
746 return -EOPNOTSUPP;
748 if (sk->sk_state == BT_CLOSED)
749 return 0;
751 skb = skb_recv_datagram(sk, flags, noblock, &err);
752 if (!skb)
753 return err;
755 copied = skb->len;
756 if (len < copied) {
757 msg->msg_flags |= MSG_TRUNC;
758 copied = len;
761 skb_reset_transport_header(skb);
762 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
764 switch (hci_pi(sk)->channel) {
765 case HCI_CHANNEL_RAW:
766 hci_sock_cmsg(sk, msg, skb);
767 break;
768 case HCI_CHANNEL_CONTROL:
769 case HCI_CHANNEL_MONITOR:
770 sock_recv_timestamp(msg, sk, skb);
771 break;
774 skb_free_datagram(sk, skb);
776 return err ? : copied;
779 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
780 struct msghdr *msg, size_t len)
782 struct sock *sk = sock->sk;
783 struct hci_dev *hdev;
784 struct sk_buff *skb;
785 int err;
787 BT_DBG("sock %p sk %p", sock, sk);
789 if (msg->msg_flags & MSG_OOB)
790 return -EOPNOTSUPP;
792 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
793 return -EINVAL;
795 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
796 return -EINVAL;
798 lock_sock(sk);
800 switch (hci_pi(sk)->channel) {
801 case HCI_CHANNEL_RAW:
802 break;
803 case HCI_CHANNEL_CONTROL:
804 err = mgmt_control(sk, msg, len);
805 goto done;
806 case HCI_CHANNEL_MONITOR:
807 err = -EOPNOTSUPP;
808 goto done;
809 default:
810 err = -EINVAL;
811 goto done;
814 hdev = hci_pi(sk)->hdev;
815 if (!hdev) {
816 err = -EBADFD;
817 goto done;
820 if (!test_bit(HCI_UP, &hdev->flags)) {
821 err = -ENETDOWN;
822 goto done;
825 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
826 if (!skb)
827 goto done;
829 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
830 err = -EFAULT;
831 goto drop;
834 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
835 skb_pull(skb, 1);
836 skb->dev = (void *) hdev;
838 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
839 u16 opcode = get_unaligned_le16(skb->data);
840 u16 ogf = hci_opcode_ogf(opcode);
841 u16 ocf = hci_opcode_ocf(opcode);
843 if (((ogf > HCI_SFLT_MAX_OGF) ||
844 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
845 &hci_sec_filter.ocf_mask[ogf])) &&
846 !capable(CAP_NET_RAW)) {
847 err = -EPERM;
848 goto drop;
851 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
852 skb_queue_tail(&hdev->raw_q, skb);
853 queue_work(hdev->workqueue, &hdev->tx_work);
854 } else {
855 /* Stand-alone HCI commands must be flaged as
856 * single-command requests.
858 bt_cb(skb)->req.start = true;
860 skb_queue_tail(&hdev->cmd_q, skb);
861 queue_work(hdev->workqueue, &hdev->cmd_work);
863 } else {
864 if (!capable(CAP_NET_RAW)) {
865 err = -EPERM;
866 goto drop;
869 skb_queue_tail(&hdev->raw_q, skb);
870 queue_work(hdev->workqueue, &hdev->tx_work);
873 err = len;
875 done:
876 release_sock(sk);
877 return err;
879 drop:
880 kfree_skb(skb);
881 goto done;
884 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
885 char __user *optval, unsigned int len)
887 struct hci_ufilter uf = { .opcode = 0 };
888 struct sock *sk = sock->sk;
889 int err = 0, opt = 0;
891 BT_DBG("sk %p, opt %d", sk, optname);
893 lock_sock(sk);
895 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
896 err = -EINVAL;
897 goto done;
900 switch (optname) {
901 case HCI_DATA_DIR:
902 if (get_user(opt, (int __user *)optval)) {
903 err = -EFAULT;
904 break;
907 if (opt)
908 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
909 else
910 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
911 break;
913 case HCI_TIME_STAMP:
914 if (get_user(opt, (int __user *)optval)) {
915 err = -EFAULT;
916 break;
919 if (opt)
920 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
921 else
922 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
923 break;
925 case HCI_FILTER:
927 struct hci_filter *f = &hci_pi(sk)->filter;
929 uf.type_mask = f->type_mask;
930 uf.opcode = f->opcode;
931 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
932 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
935 len = min_t(unsigned int, len, sizeof(uf));
936 if (copy_from_user(&uf, optval, len)) {
937 err = -EFAULT;
938 break;
941 if (!capable(CAP_NET_RAW)) {
942 uf.type_mask &= hci_sec_filter.type_mask;
943 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
944 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
948 struct hci_filter *f = &hci_pi(sk)->filter;
950 f->type_mask = uf.type_mask;
951 f->opcode = uf.opcode;
952 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
953 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
955 break;
957 default:
958 err = -ENOPROTOOPT;
959 break;
962 done:
963 release_sock(sk);
964 return err;
967 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
968 char __user *optval, int __user *optlen)
970 struct hci_ufilter uf;
971 struct sock *sk = sock->sk;
972 int len, opt, err = 0;
974 BT_DBG("sk %p, opt %d", sk, optname);
976 if (get_user(len, optlen))
977 return -EFAULT;
979 lock_sock(sk);
981 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
982 err = -EINVAL;
983 goto done;
986 switch (optname) {
987 case HCI_DATA_DIR:
988 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
989 opt = 1;
990 else
991 opt = 0;
993 if (put_user(opt, optval))
994 err = -EFAULT;
995 break;
997 case HCI_TIME_STAMP:
998 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
999 opt = 1;
1000 else
1001 opt = 0;
1003 if (put_user(opt, optval))
1004 err = -EFAULT;
1005 break;
1007 case HCI_FILTER:
1009 struct hci_filter *f = &hci_pi(sk)->filter;
1011 memset(&uf, 0, sizeof(uf));
1012 uf.type_mask = f->type_mask;
1013 uf.opcode = f->opcode;
1014 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1015 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1018 len = min_t(unsigned int, len, sizeof(uf));
1019 if (copy_to_user(optval, &uf, len))
1020 err = -EFAULT;
1021 break;
1023 default:
1024 err = -ENOPROTOOPT;
1025 break;
1028 done:
1029 release_sock(sk);
1030 return err;
1033 static const struct proto_ops hci_sock_ops = {
1034 .family = PF_BLUETOOTH,
1035 .owner = THIS_MODULE,
1036 .release = hci_sock_release,
1037 .bind = hci_sock_bind,
1038 .getname = hci_sock_getname,
1039 .sendmsg = hci_sock_sendmsg,
1040 .recvmsg = hci_sock_recvmsg,
1041 .ioctl = hci_sock_ioctl,
1042 .poll = datagram_poll,
1043 .listen = sock_no_listen,
1044 .shutdown = sock_no_shutdown,
1045 .setsockopt = hci_sock_setsockopt,
1046 .getsockopt = hci_sock_getsockopt,
1047 .connect = sock_no_connect,
1048 .socketpair = sock_no_socketpair,
1049 .accept = sock_no_accept,
1050 .mmap = sock_no_mmap
1053 static struct proto hci_sk_proto = {
1054 .name = "HCI",
1055 .owner = THIS_MODULE,
1056 .obj_size = sizeof(struct hci_pinfo)
1059 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1060 int kern)
1062 struct sock *sk;
1064 BT_DBG("sock %p", sock);
1066 if (sock->type != SOCK_RAW)
1067 return -ESOCKTNOSUPPORT;
1069 sock->ops = &hci_sock_ops;
1071 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1072 if (!sk)
1073 return -ENOMEM;
1075 sock_init_data(sock, sk);
1077 sock_reset_flag(sk, SOCK_ZAPPED);
1079 sk->sk_protocol = protocol;
1081 sock->state = SS_UNCONNECTED;
1082 sk->sk_state = BT_OPEN;
1084 bt_sock_link(&hci_sk_list, sk);
1085 return 0;
1088 static const struct net_proto_family hci_sock_family_ops = {
1089 .family = PF_BLUETOOTH,
1090 .owner = THIS_MODULE,
1091 .create = hci_sock_create,
1094 int __init hci_sock_init(void)
1096 int err;
1098 err = proto_register(&hci_sk_proto, 0);
1099 if (err < 0)
1100 return err;
1102 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1103 if (err < 0) {
1104 BT_ERR("HCI socket registration failed");
1105 goto error;
1108 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1109 if (err < 0) {
1110 BT_ERR("Failed to create HCI proc file");
1111 bt_sock_unregister(BTPROTO_HCI);
1112 goto error;
1115 BT_INFO("HCI socket layer initialized");
1117 return 0;
1119 error:
1120 proto_unregister(&hci_sk_proto);
1121 return err;
1124 void hci_sock_cleanup(void)
1126 bt_procfs_cleanup(&init_net, "hci");
1127 bt_sock_unregister(BTPROTO_HCI);
1128 proto_unregister(&hci_sk_proto);