powerpc/eeh: Fix PE#0 check in eeh_add_to_parent_pe()
[linux/fpc-iii.git] / net / bluetooth / hci_sock.c
blob1d65c5be7c823a282bc74a4d9691db53eb37b8d3
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
41 struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
49 static inline int hci_test_bit(int nr, void *addr)
51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
63 static const struct hci_sec_filter hci_sec_filter = {
64 /* Packet types */
65 0x10,
66 /* Events */
67 { 0x1000d9fe, 0x0000b00c },
68 /* Commands */
70 { 0x0 },
71 /* OGF_LINK_CTL */
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 /* OGF_LINK_POLICY */
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 /* OGF_HOST_CTL */
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 /* OGF_INFO_PARAM */
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list = {
85 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
90 struct hci_filter *flt;
91 int flt_type, flt_event;
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 flt_type = 0;
98 else
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
101 if (!test_bit(flt_type, &flt->type_mask))
102 return true;
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 return false;
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
110 if (!hci_test_bit(flt_event, &flt->event_mask))
111 return true;
113 /* Check filter only when opcode is set */
114 if (!flt->opcode)
115 return false;
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 return true;
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 return true;
125 return false;
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
131 struct sock *sk;
132 struct sk_buff *skb_copy = NULL;
134 BT_DBG("hdev %p len %d", hdev, skb->len);
136 read_lock(&hci_sk_list.lock);
138 sk_for_each(sk, &hci_sk_list.head) {
139 struct sk_buff *nskb;
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 continue;
144 /* Don't send frame to the socket it came from */
145 if (skb->sk == sk)
146 continue;
148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
150 continue;
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
153 continue;
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 continue;
158 } else {
159 /* Don't send frame to other channel types */
160 continue;
163 if (!skb_copy) {
164 /* Create a private copy with headroom */
165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
166 if (!skb_copy)
167 continue;
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
174 if (!nskb)
175 continue;
177 if (sock_queue_rcv_skb(sk, nskb))
178 kfree_skb(nskb);
181 read_unlock(&hci_sk_list.lock);
183 kfree_skb(skb_copy);
186 /* Send frame to control socket */
187 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
189 struct sock *sk;
191 BT_DBG("len %d", skb->len);
193 read_lock(&hci_sk_list.lock);
195 sk_for_each(sk, &hci_sk_list.head) {
196 struct sk_buff *nskb;
198 /* Skip the original socket */
199 if (sk == skip_sk)
200 continue;
202 if (sk->sk_state != BT_BOUND)
203 continue;
205 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206 continue;
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (!nskb)
210 continue;
212 if (sock_queue_rcv_skb(sk, nskb))
213 kfree_skb(nskb);
216 read_unlock(&hci_sk_list.lock);
219 static void queue_monitor_skb(struct sk_buff *skb)
221 struct sock *sk;
223 BT_DBG("len %d", skb->len);
225 read_lock(&hci_sk_list.lock);
227 sk_for_each(sk, &hci_sk_list.head) {
228 struct sk_buff *nskb;
230 if (sk->sk_state != BT_BOUND)
231 continue;
233 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
234 continue;
236 nskb = skb_clone(skb, GFP_ATOMIC);
237 if (!nskb)
238 continue;
240 if (sock_queue_rcv_skb(sk, nskb))
241 kfree_skb(nskb);
244 read_unlock(&hci_sk_list.lock);
247 /* Send frame to monitor socket */
248 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
250 struct sk_buff *skb_copy = NULL;
251 struct hci_mon_hdr *hdr;
252 __le16 opcode;
254 if (!atomic_read(&monitor_promisc))
255 return;
257 BT_DBG("hdev %p len %d", hdev, skb->len);
259 switch (bt_cb(skb)->pkt_type) {
260 case HCI_COMMAND_PKT:
261 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
262 break;
263 case HCI_EVENT_PKT:
264 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
265 break;
266 case HCI_ACLDATA_PKT:
267 if (bt_cb(skb)->incoming)
268 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
269 else
270 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
271 break;
272 case HCI_SCODATA_PKT:
273 if (bt_cb(skb)->incoming)
274 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
275 else
276 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
277 break;
278 default:
279 return;
282 /* Create a private copy with headroom */
283 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
284 if (!skb_copy)
285 return;
287 /* Put header before the data */
288 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
289 hdr->opcode = opcode;
290 hdr->index = cpu_to_le16(hdev->id);
291 hdr->len = cpu_to_le16(skb->len);
293 queue_monitor_skb(skb_copy);
294 kfree_skb(skb_copy);
297 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
316 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
324 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
327 default:
328 return NULL;
331 __net_timestamp(skb);
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
338 return skb;
341 static void send_monitor_replay(struct sock *sk)
343 struct hci_dev *hdev;
345 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
358 read_unlock(&hci_dev_list_lock);
361 /* Generate internal stack event */
362 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 hci_send_to_sock(hdev, skb);
385 kfree_skb(skb);
388 void hci_sock_dev_event(struct hci_dev *hdev, int event)
390 struct hci_ev_si_device ev;
392 BT_DBG("hdev %s event %d", hdev->name, event);
394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc)) {
396 struct sk_buff *skb;
398 skb = create_monitor_event(hdev, event);
399 if (skb) {
400 queue_monitor_skb(skb);
401 kfree_skb(skb);
405 /* Send event to sockets */
406 ev.event = event;
407 ev.dev_id = hdev->id;
408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410 if (event == HCI_DEV_UNREG) {
411 struct sock *sk;
413 /* Detach sockets from device */
414 read_lock(&hci_sk_list.lock);
415 sk_for_each(sk, &hci_sk_list.head) {
416 bh_lock_sock_nested(sk);
417 if (hci_pi(sk)->hdev == hdev) {
418 hci_pi(sk)->hdev = NULL;
419 sk->sk_err = EPIPE;
420 sk->sk_state = BT_OPEN;
421 sk->sk_state_change(sk);
423 hci_dev_put(hdev);
425 bh_unlock_sock(sk);
427 read_unlock(&hci_sk_list.lock);
431 static int hci_sock_release(struct socket *sock)
433 struct sock *sk = sock->sk;
434 struct hci_dev *hdev;
436 BT_DBG("sock %p sk %p", sock, sk);
438 if (!sk)
439 return 0;
441 hdev = hci_pi(sk)->hdev;
443 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
444 atomic_dec(&monitor_promisc);
446 bt_sock_unlink(&hci_sk_list, sk);
448 if (hdev) {
449 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
450 mgmt_index_added(hdev);
451 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
452 hci_dev_close(hdev->id);
455 atomic_dec(&hdev->promisc);
456 hci_dev_put(hdev);
459 sock_orphan(sk);
461 skb_queue_purge(&sk->sk_receive_queue);
462 skb_queue_purge(&sk->sk_write_queue);
464 sock_put(sk);
465 return 0;
468 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
470 bdaddr_t bdaddr;
471 int err;
473 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
474 return -EFAULT;
476 hci_dev_lock(hdev);
478 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
480 hci_dev_unlock(hdev);
482 return err;
485 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
487 bdaddr_t bdaddr;
488 int err;
490 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
491 return -EFAULT;
493 hci_dev_lock(hdev);
495 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
497 hci_dev_unlock(hdev);
499 return err;
502 /* Ioctls that require bound socket */
503 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
504 unsigned long arg)
506 struct hci_dev *hdev = hci_pi(sk)->hdev;
508 if (!hdev)
509 return -EBADFD;
511 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
512 return -EBUSY;
514 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
515 return -EOPNOTSUPP;
517 if (hdev->dev_type != HCI_BREDR)
518 return -EOPNOTSUPP;
520 switch (cmd) {
521 case HCISETRAW:
522 if (!capable(CAP_NET_ADMIN))
523 return -EPERM;
524 return -EOPNOTSUPP;
526 case HCIGETCONNINFO:
527 return hci_get_conn_info(hdev, (void __user *) arg);
529 case HCIGETAUTHINFO:
530 return hci_get_auth_info(hdev, (void __user *) arg);
532 case HCIBLOCKADDR:
533 if (!capable(CAP_NET_ADMIN))
534 return -EPERM;
535 return hci_sock_blacklist_add(hdev, (void __user *) arg);
537 case HCIUNBLOCKADDR:
538 if (!capable(CAP_NET_ADMIN))
539 return -EPERM;
540 return hci_sock_blacklist_del(hdev, (void __user *) arg);
543 return -ENOIOCTLCMD;
546 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
547 unsigned long arg)
549 void __user *argp = (void __user *) arg;
550 struct sock *sk = sock->sk;
551 int err;
553 BT_DBG("cmd %x arg %lx", cmd, arg);
555 lock_sock(sk);
557 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
558 err = -EBADFD;
559 goto done;
562 release_sock(sk);
564 switch (cmd) {
565 case HCIGETDEVLIST:
566 return hci_get_dev_list(argp);
568 case HCIGETDEVINFO:
569 return hci_get_dev_info(argp);
571 case HCIGETCONNLIST:
572 return hci_get_conn_list(argp);
574 case HCIDEVUP:
575 if (!capable(CAP_NET_ADMIN))
576 return -EPERM;
577 return hci_dev_open(arg);
579 case HCIDEVDOWN:
580 if (!capable(CAP_NET_ADMIN))
581 return -EPERM;
582 return hci_dev_close(arg);
584 case HCIDEVRESET:
585 if (!capable(CAP_NET_ADMIN))
586 return -EPERM;
587 return hci_dev_reset(arg);
589 case HCIDEVRESTAT:
590 if (!capable(CAP_NET_ADMIN))
591 return -EPERM;
592 return hci_dev_reset_stat(arg);
594 case HCISETSCAN:
595 case HCISETAUTH:
596 case HCISETENCRYPT:
597 case HCISETPTYPE:
598 case HCISETLINKPOL:
599 case HCISETLINKMODE:
600 case HCISETACLMTU:
601 case HCISETSCOMTU:
602 if (!capable(CAP_NET_ADMIN))
603 return -EPERM;
604 return hci_dev_cmd(cmd, argp);
606 case HCIINQUIRY:
607 return hci_inquiry(argp);
610 lock_sock(sk);
612 err = hci_sock_bound_ioctl(sk, cmd, arg);
614 done:
615 release_sock(sk);
616 return err;
619 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
620 int addr_len)
622 struct sockaddr_hci haddr;
623 struct sock *sk = sock->sk;
624 struct hci_dev *hdev = NULL;
625 int len, err = 0;
627 BT_DBG("sock %p sk %p", sock, sk);
629 if (!addr)
630 return -EINVAL;
632 memset(&haddr, 0, sizeof(haddr));
633 len = min_t(unsigned int, sizeof(haddr), addr_len);
634 memcpy(&haddr, addr, len);
636 if (haddr.hci_family != AF_BLUETOOTH)
637 return -EINVAL;
639 lock_sock(sk);
641 if (sk->sk_state == BT_BOUND) {
642 err = -EALREADY;
643 goto done;
646 switch (haddr.hci_channel) {
647 case HCI_CHANNEL_RAW:
648 if (hci_pi(sk)->hdev) {
649 err = -EALREADY;
650 goto done;
653 if (haddr.hci_dev != HCI_DEV_NONE) {
654 hdev = hci_dev_get(haddr.hci_dev);
655 if (!hdev) {
656 err = -ENODEV;
657 goto done;
660 atomic_inc(&hdev->promisc);
663 hci_pi(sk)->hdev = hdev;
664 break;
666 case HCI_CHANNEL_USER:
667 if (hci_pi(sk)->hdev) {
668 err = -EALREADY;
669 goto done;
672 if (haddr.hci_dev == HCI_DEV_NONE) {
673 err = -EINVAL;
674 goto done;
677 if (!capable(CAP_NET_ADMIN)) {
678 err = -EPERM;
679 goto done;
682 hdev = hci_dev_get(haddr.hci_dev);
683 if (!hdev) {
684 err = -ENODEV;
685 goto done;
688 if (test_bit(HCI_UP, &hdev->flags) ||
689 test_bit(HCI_INIT, &hdev->flags) ||
690 test_bit(HCI_SETUP, &hdev->dev_flags) ||
691 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
692 err = -EBUSY;
693 hci_dev_put(hdev);
694 goto done;
697 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
698 err = -EUSERS;
699 hci_dev_put(hdev);
700 goto done;
703 mgmt_index_removed(hdev);
705 err = hci_dev_open(hdev->id);
706 if (err) {
707 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
708 mgmt_index_added(hdev);
709 hci_dev_put(hdev);
710 goto done;
713 atomic_inc(&hdev->promisc);
715 hci_pi(sk)->hdev = hdev;
716 break;
718 case HCI_CHANNEL_CONTROL:
719 if (haddr.hci_dev != HCI_DEV_NONE) {
720 err = -EINVAL;
721 goto done;
724 if (!capable(CAP_NET_ADMIN)) {
725 err = -EPERM;
726 goto done;
729 break;
731 case HCI_CHANNEL_MONITOR:
732 if (haddr.hci_dev != HCI_DEV_NONE) {
733 err = -EINVAL;
734 goto done;
737 if (!capable(CAP_NET_RAW)) {
738 err = -EPERM;
739 goto done;
742 send_monitor_replay(sk);
744 atomic_inc(&monitor_promisc);
745 break;
747 default:
748 err = -EINVAL;
749 goto done;
753 hci_pi(sk)->channel = haddr.hci_channel;
754 sk->sk_state = BT_BOUND;
756 done:
757 release_sock(sk);
758 return err;
761 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
762 int *addr_len, int peer)
764 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
765 struct sock *sk = sock->sk;
766 struct hci_dev *hdev;
767 int err = 0;
769 BT_DBG("sock %p sk %p", sock, sk);
771 if (peer)
772 return -EOPNOTSUPP;
774 lock_sock(sk);
776 hdev = hci_pi(sk)->hdev;
777 if (!hdev) {
778 err = -EBADFD;
779 goto done;
782 *addr_len = sizeof(*haddr);
783 haddr->hci_family = AF_BLUETOOTH;
784 haddr->hci_dev = hdev->id;
785 haddr->hci_channel= hci_pi(sk)->channel;
787 done:
788 release_sock(sk);
789 return err;
792 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
793 struct sk_buff *skb)
795 __u32 mask = hci_pi(sk)->cmsg_mask;
797 if (mask & HCI_CMSG_DIR) {
798 int incoming = bt_cb(skb)->incoming;
799 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
800 &incoming);
803 if (mask & HCI_CMSG_TSTAMP) {
804 #ifdef CONFIG_COMPAT
805 struct compat_timeval ctv;
806 #endif
807 struct timeval tv;
808 void *data;
809 int len;
811 skb_get_timestamp(skb, &tv);
813 data = &tv;
814 len = sizeof(tv);
815 #ifdef CONFIG_COMPAT
816 if (!COMPAT_USE_64BIT_TIME &&
817 (msg->msg_flags & MSG_CMSG_COMPAT)) {
818 ctv.tv_sec = tv.tv_sec;
819 ctv.tv_usec = tv.tv_usec;
820 data = &ctv;
821 len = sizeof(ctv);
823 #endif
825 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
829 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
830 struct msghdr *msg, size_t len, int flags)
832 int noblock = flags & MSG_DONTWAIT;
833 struct sock *sk = sock->sk;
834 struct sk_buff *skb;
835 int copied, err;
837 BT_DBG("sock %p, sk %p", sock, sk);
839 if (flags & (MSG_OOB))
840 return -EOPNOTSUPP;
842 if (sk->sk_state == BT_CLOSED)
843 return 0;
845 skb = skb_recv_datagram(sk, flags, noblock, &err);
846 if (!skb)
847 return err;
849 copied = skb->len;
850 if (len < copied) {
851 msg->msg_flags |= MSG_TRUNC;
852 copied = len;
855 skb_reset_transport_header(skb);
856 err = skb_copy_datagram_msg(skb, 0, msg, copied);
858 switch (hci_pi(sk)->channel) {
859 case HCI_CHANNEL_RAW:
860 hci_sock_cmsg(sk, msg, skb);
861 break;
862 case HCI_CHANNEL_USER:
863 case HCI_CHANNEL_CONTROL:
864 case HCI_CHANNEL_MONITOR:
865 sock_recv_timestamp(msg, sk, skb);
866 break;
869 skb_free_datagram(sk, skb);
871 return err ? : copied;
874 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
875 struct msghdr *msg, size_t len)
877 struct sock *sk = sock->sk;
878 struct hci_dev *hdev;
879 struct sk_buff *skb;
880 int err;
882 BT_DBG("sock %p sk %p", sock, sk);
884 if (msg->msg_flags & MSG_OOB)
885 return -EOPNOTSUPP;
887 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
888 return -EINVAL;
890 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
891 return -EINVAL;
893 lock_sock(sk);
895 switch (hci_pi(sk)->channel) {
896 case HCI_CHANNEL_RAW:
897 case HCI_CHANNEL_USER:
898 break;
899 case HCI_CHANNEL_CONTROL:
900 err = mgmt_control(sk, msg, len);
901 goto done;
902 case HCI_CHANNEL_MONITOR:
903 err = -EOPNOTSUPP;
904 goto done;
905 default:
906 err = -EINVAL;
907 goto done;
910 hdev = hci_pi(sk)->hdev;
911 if (!hdev) {
912 err = -EBADFD;
913 goto done;
916 if (!test_bit(HCI_UP, &hdev->flags)) {
917 err = -ENETDOWN;
918 goto done;
921 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
922 if (!skb)
923 goto done;
925 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
926 err = -EFAULT;
927 goto drop;
930 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
931 skb_pull(skb, 1);
933 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
934 /* No permission check is needed for user channel
935 * since that gets enforced when binding the socket.
937 * However check that the packet type is valid.
939 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
940 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
941 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
942 err = -EINVAL;
943 goto drop;
946 skb_queue_tail(&hdev->raw_q, skb);
947 queue_work(hdev->workqueue, &hdev->tx_work);
948 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
949 u16 opcode = get_unaligned_le16(skb->data);
950 u16 ogf = hci_opcode_ogf(opcode);
951 u16 ocf = hci_opcode_ocf(opcode);
953 if (((ogf > HCI_SFLT_MAX_OGF) ||
954 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
955 &hci_sec_filter.ocf_mask[ogf])) &&
956 !capable(CAP_NET_RAW)) {
957 err = -EPERM;
958 goto drop;
961 if (ogf == 0x3f) {
962 skb_queue_tail(&hdev->raw_q, skb);
963 queue_work(hdev->workqueue, &hdev->tx_work);
964 } else {
965 /* Stand-alone HCI commands must be flagged as
966 * single-command requests.
968 bt_cb(skb)->req.start = true;
970 skb_queue_tail(&hdev->cmd_q, skb);
971 queue_work(hdev->workqueue, &hdev->cmd_work);
973 } else {
974 if (!capable(CAP_NET_RAW)) {
975 err = -EPERM;
976 goto drop;
979 skb_queue_tail(&hdev->raw_q, skb);
980 queue_work(hdev->workqueue, &hdev->tx_work);
983 err = len;
985 done:
986 release_sock(sk);
987 return err;
989 drop:
990 kfree_skb(skb);
991 goto done;
994 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
995 char __user *optval, unsigned int len)
997 struct hci_ufilter uf = { .opcode = 0 };
998 struct sock *sk = sock->sk;
999 int err = 0, opt = 0;
1001 BT_DBG("sk %p, opt %d", sk, optname);
1003 lock_sock(sk);
1005 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1006 err = -EBADFD;
1007 goto done;
1010 switch (optname) {
1011 case HCI_DATA_DIR:
1012 if (get_user(opt, (int __user *)optval)) {
1013 err = -EFAULT;
1014 break;
1017 if (opt)
1018 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1019 else
1020 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1021 break;
1023 case HCI_TIME_STAMP:
1024 if (get_user(opt, (int __user *)optval)) {
1025 err = -EFAULT;
1026 break;
1029 if (opt)
1030 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1031 else
1032 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1033 break;
1035 case HCI_FILTER:
1037 struct hci_filter *f = &hci_pi(sk)->filter;
1039 uf.type_mask = f->type_mask;
1040 uf.opcode = f->opcode;
1041 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1042 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1045 len = min_t(unsigned int, len, sizeof(uf));
1046 if (copy_from_user(&uf, optval, len)) {
1047 err = -EFAULT;
1048 break;
1051 if (!capable(CAP_NET_RAW)) {
1052 uf.type_mask &= hci_sec_filter.type_mask;
1053 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1054 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1058 struct hci_filter *f = &hci_pi(sk)->filter;
1060 f->type_mask = uf.type_mask;
1061 f->opcode = uf.opcode;
1062 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1063 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1065 break;
1067 default:
1068 err = -ENOPROTOOPT;
1069 break;
1072 done:
1073 release_sock(sk);
1074 return err;
1077 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1078 char __user *optval, int __user *optlen)
1080 struct hci_ufilter uf;
1081 struct sock *sk = sock->sk;
1082 int len, opt, err = 0;
1084 BT_DBG("sk %p, opt %d", sk, optname);
1086 if (get_user(len, optlen))
1087 return -EFAULT;
1089 lock_sock(sk);
1091 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1092 err = -EBADFD;
1093 goto done;
1096 switch (optname) {
1097 case HCI_DATA_DIR:
1098 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1099 opt = 1;
1100 else
1101 opt = 0;
1103 if (put_user(opt, optval))
1104 err = -EFAULT;
1105 break;
1107 case HCI_TIME_STAMP:
1108 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1109 opt = 1;
1110 else
1111 opt = 0;
1113 if (put_user(opt, optval))
1114 err = -EFAULT;
1115 break;
1117 case HCI_FILTER:
1119 struct hci_filter *f = &hci_pi(sk)->filter;
1121 memset(&uf, 0, sizeof(uf));
1122 uf.type_mask = f->type_mask;
1123 uf.opcode = f->opcode;
1124 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1125 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1128 len = min_t(unsigned int, len, sizeof(uf));
1129 if (copy_to_user(optval, &uf, len))
1130 err = -EFAULT;
1131 break;
1133 default:
1134 err = -ENOPROTOOPT;
1135 break;
1138 done:
1139 release_sock(sk);
1140 return err;
1143 static const struct proto_ops hci_sock_ops = {
1144 .family = PF_BLUETOOTH,
1145 .owner = THIS_MODULE,
1146 .release = hci_sock_release,
1147 .bind = hci_sock_bind,
1148 .getname = hci_sock_getname,
1149 .sendmsg = hci_sock_sendmsg,
1150 .recvmsg = hci_sock_recvmsg,
1151 .ioctl = hci_sock_ioctl,
1152 .poll = datagram_poll,
1153 .listen = sock_no_listen,
1154 .shutdown = sock_no_shutdown,
1155 .setsockopt = hci_sock_setsockopt,
1156 .getsockopt = hci_sock_getsockopt,
1157 .connect = sock_no_connect,
1158 .socketpair = sock_no_socketpair,
1159 .accept = sock_no_accept,
1160 .mmap = sock_no_mmap
1163 static struct proto hci_sk_proto = {
1164 .name = "HCI",
1165 .owner = THIS_MODULE,
1166 .obj_size = sizeof(struct hci_pinfo)
1169 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1170 int kern)
1172 struct sock *sk;
1174 BT_DBG("sock %p", sock);
1176 if (sock->type != SOCK_RAW)
1177 return -ESOCKTNOSUPPORT;
1179 sock->ops = &hci_sock_ops;
1181 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1182 if (!sk)
1183 return -ENOMEM;
1185 sock_init_data(sock, sk);
1187 sock_reset_flag(sk, SOCK_ZAPPED);
1189 sk->sk_protocol = protocol;
1191 sock->state = SS_UNCONNECTED;
1192 sk->sk_state = BT_OPEN;
1194 bt_sock_link(&hci_sk_list, sk);
1195 return 0;
1198 static const struct net_proto_family hci_sock_family_ops = {
1199 .family = PF_BLUETOOTH,
1200 .owner = THIS_MODULE,
1201 .create = hci_sock_create,
1204 int __init hci_sock_init(void)
1206 int err;
1208 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1210 err = proto_register(&hci_sk_proto, 0);
1211 if (err < 0)
1212 return err;
1214 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1215 if (err < 0) {
1216 BT_ERR("HCI socket registration failed");
1217 goto error;
1220 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1221 if (err < 0) {
1222 BT_ERR("Failed to create HCI proc file");
1223 bt_sock_unregister(BTPROTO_HCI);
1224 goto error;
1227 BT_INFO("HCI socket layer initialized");
1229 return 0;
1231 error:
1232 proto_unregister(&hci_sk_proto);
1233 return err;
1236 void hci_sock_cleanup(void)
1238 bt_procfs_cleanup(&init_net, "hci");
1239 bt_sock_unregister(BTPROTO_HCI);
1240 proto_unregister(&hci_sk_proto);