Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[cris-mirror.git] / net / bluetooth / hci_sock.c
blob295e4a88fff8ccea7e197560d47f90bc6584b7df
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 static int enable_mgmt;
54 /* ----- HCI socket interface ----- */
56 static inline int hci_test_bit(int nr, void *addr)
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
61 /* Security filter */
62 static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
66 { 0x1000d9fe, 0x0000b00c },
67 /* Commands */
69 { 0x0 },
70 /* OGF_LINK_CTL */
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
72 /* OGF_LINK_POLICY */
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
74 /* OGF_HOST_CTL */
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
76 /* OGF_INFO_PARAM */
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
91 struct sock *sk;
92 struct hlist_node *node;
94 BT_DBG("hdev %p len %d", hdev, skb->len);
96 read_lock(&hci_sk_list.lock);
97 sk_for_each(sk, node, &hci_sk_list.head) {
98 struct hci_filter *flt;
99 struct sk_buff *nskb;
101 if (sk == skip_sk)
102 continue;
104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 continue;
107 /* Don't send frame to the socket it came from */
108 if (skb->sk == sk)
109 continue;
111 if (bt_cb(skb)->channel != hci_pi(sk)->channel)
112 continue;
114 if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
115 goto clone;
117 /* Apply filter */
118 flt = &hci_pi(sk)->filter;
120 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
121 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
122 continue;
124 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
125 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
127 if (!hci_test_bit(evt, &flt->event_mask))
128 continue;
130 if (flt->opcode &&
131 ((evt == HCI_EV_CMD_COMPLETE &&
132 flt->opcode !=
133 get_unaligned((__le16 *)(skb->data + 3))) ||
134 (evt == HCI_EV_CMD_STATUS &&
135 flt->opcode !=
136 get_unaligned((__le16 *)(skb->data + 4)))))
137 continue;
140 clone:
141 nskb = skb_clone(skb, GFP_ATOMIC);
142 if (!nskb)
143 continue;
145 /* Put type byte before the data */
146 if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
147 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
152 read_unlock(&hci_sk_list.lock);
155 static int hci_sock_release(struct socket *sock)
157 struct sock *sk = sock->sk;
158 struct hci_dev *hdev;
160 BT_DBG("sock %p sk %p", sock, sk);
162 if (!sk)
163 return 0;
165 hdev = hci_pi(sk)->hdev;
167 bt_sock_unlink(&hci_sk_list, sk);
169 if (hdev) {
170 atomic_dec(&hdev->promisc);
171 hci_dev_put(hdev);
174 sock_orphan(sk);
176 skb_queue_purge(&sk->sk_receive_queue);
177 skb_queue_purge(&sk->sk_write_queue);
179 sock_put(sk);
180 return 0;
183 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
185 struct list_head *p;
187 list_for_each(p, &hdev->blacklist) {
188 struct bdaddr_list *b;
190 b = list_entry(p, struct bdaddr_list, list);
192 if (bacmp(bdaddr, &b->bdaddr) == 0)
193 return b;
196 return NULL;
199 static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
201 bdaddr_t bdaddr;
202 struct bdaddr_list *entry;
204 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
205 return -EFAULT;
207 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
208 return -EBADF;
210 if (hci_blacklist_lookup(hdev, &bdaddr))
211 return -EEXIST;
213 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
214 if (!entry)
215 return -ENOMEM;
217 bacpy(&entry->bdaddr, &bdaddr);
219 list_add(&entry->list, &hdev->blacklist);
221 return 0;
224 int hci_blacklist_clear(struct hci_dev *hdev)
226 struct list_head *p, *n;
228 list_for_each_safe(p, n, &hdev->blacklist) {
229 struct bdaddr_list *b;
231 b = list_entry(p, struct bdaddr_list, list);
233 list_del(p);
234 kfree(b);
237 return 0;
240 static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
242 bdaddr_t bdaddr;
243 struct bdaddr_list *entry;
245 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
246 return -EFAULT;
248 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
249 return hci_blacklist_clear(hdev);
251 entry = hci_blacklist_lookup(hdev, &bdaddr);
252 if (!entry)
253 return -ENOENT;
255 list_del(&entry->list);
256 kfree(entry);
258 return 0;
261 /* Ioctls that require bound socket */
262 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
264 struct hci_dev *hdev = hci_pi(sk)->hdev;
266 if (!hdev)
267 return -EBADFD;
269 switch (cmd) {
270 case HCISETRAW:
271 if (!capable(CAP_NET_ADMIN))
272 return -EACCES;
274 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
275 return -EPERM;
277 if (arg)
278 set_bit(HCI_RAW, &hdev->flags);
279 else
280 clear_bit(HCI_RAW, &hdev->flags);
282 return 0;
284 case HCIGETCONNINFO:
285 return hci_get_conn_info(hdev, (void __user *) arg);
287 case HCIGETAUTHINFO:
288 return hci_get_auth_info(hdev, (void __user *) arg);
290 case HCIBLOCKADDR:
291 if (!capable(CAP_NET_ADMIN))
292 return -EACCES;
293 return hci_blacklist_add(hdev, (void __user *) arg);
295 case HCIUNBLOCKADDR:
296 if (!capable(CAP_NET_ADMIN))
297 return -EACCES;
298 return hci_blacklist_del(hdev, (void __user *) arg);
300 default:
301 if (hdev->ioctl)
302 return hdev->ioctl(hdev, cmd, arg);
303 return -EINVAL;
307 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
309 struct sock *sk = sock->sk;
310 void __user *argp = (void __user *) arg;
311 int err;
313 BT_DBG("cmd %x arg %lx", cmd, arg);
315 switch (cmd) {
316 case HCIGETDEVLIST:
317 return hci_get_dev_list(argp);
319 case HCIGETDEVINFO:
320 return hci_get_dev_info(argp);
322 case HCIGETCONNLIST:
323 return hci_get_conn_list(argp);
325 case HCIDEVUP:
326 if (!capable(CAP_NET_ADMIN))
327 return -EACCES;
328 return hci_dev_open(arg);
330 case HCIDEVDOWN:
331 if (!capable(CAP_NET_ADMIN))
332 return -EACCES;
333 return hci_dev_close(arg);
335 case HCIDEVRESET:
336 if (!capable(CAP_NET_ADMIN))
337 return -EACCES;
338 return hci_dev_reset(arg);
340 case HCIDEVRESTAT:
341 if (!capable(CAP_NET_ADMIN))
342 return -EACCES;
343 return hci_dev_reset_stat(arg);
345 case HCISETSCAN:
346 case HCISETAUTH:
347 case HCISETENCRYPT:
348 case HCISETPTYPE:
349 case HCISETLINKPOL:
350 case HCISETLINKMODE:
351 case HCISETACLMTU:
352 case HCISETSCOMTU:
353 if (!capable(CAP_NET_ADMIN))
354 return -EACCES;
355 return hci_dev_cmd(cmd, argp);
357 case HCIINQUIRY:
358 return hci_inquiry(argp);
360 default:
361 lock_sock(sk);
362 err = hci_sock_bound_ioctl(sk, cmd, arg);
363 release_sock(sk);
364 return err;
368 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
370 struct sockaddr_hci haddr;
371 struct sock *sk = sock->sk;
372 struct hci_dev *hdev = NULL;
373 int len, err = 0;
375 BT_DBG("sock %p sk %p", sock, sk);
377 if (!addr)
378 return -EINVAL;
380 memset(&haddr, 0, sizeof(haddr));
381 len = min_t(unsigned int, sizeof(haddr), addr_len);
382 memcpy(&haddr, addr, len);
384 if (haddr.hci_family != AF_BLUETOOTH)
385 return -EINVAL;
387 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
388 return -EINVAL;
390 if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
391 return -EINVAL;
393 lock_sock(sk);
395 if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
396 err = -EALREADY;
397 goto done;
400 if (haddr.hci_dev != HCI_DEV_NONE) {
401 hdev = hci_dev_get(haddr.hci_dev);
402 if (!hdev) {
403 err = -ENODEV;
404 goto done;
407 atomic_inc(&hdev->promisc);
410 hci_pi(sk)->channel = haddr.hci_channel;
411 hci_pi(sk)->hdev = hdev;
412 sk->sk_state = BT_BOUND;
414 done:
415 release_sock(sk);
416 return err;
419 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
421 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
422 struct sock *sk = sock->sk;
423 struct hci_dev *hdev = hci_pi(sk)->hdev;
425 BT_DBG("sock %p sk %p", sock, sk);
427 if (!hdev)
428 return -EBADFD;
430 lock_sock(sk);
432 *addr_len = sizeof(*haddr);
433 haddr->hci_family = AF_BLUETOOTH;
434 haddr->hci_dev = hdev->id;
436 release_sock(sk);
437 return 0;
440 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
442 __u32 mask = hci_pi(sk)->cmsg_mask;
444 if (mask & HCI_CMSG_DIR) {
445 int incoming = bt_cb(skb)->incoming;
446 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
449 if (mask & HCI_CMSG_TSTAMP) {
450 #ifdef CONFIG_COMPAT
451 struct compat_timeval ctv;
452 #endif
453 struct timeval tv;
454 void *data;
455 int len;
457 skb_get_timestamp(skb, &tv);
459 data = &tv;
460 len = sizeof(tv);
461 #ifdef CONFIG_COMPAT
462 if (msg->msg_flags & MSG_CMSG_COMPAT) {
463 ctv.tv_sec = tv.tv_sec;
464 ctv.tv_usec = tv.tv_usec;
465 data = &ctv;
466 len = sizeof(ctv);
468 #endif
470 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
474 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
475 struct msghdr *msg, size_t len, int flags)
477 int noblock = flags & MSG_DONTWAIT;
478 struct sock *sk = sock->sk;
479 struct sk_buff *skb;
480 int copied, err;
482 BT_DBG("sock %p, sk %p", sock, sk);
484 if (flags & (MSG_OOB))
485 return -EOPNOTSUPP;
487 if (sk->sk_state == BT_CLOSED)
488 return 0;
490 skb = skb_recv_datagram(sk, flags, noblock, &err);
491 if (!skb)
492 return err;
494 msg->msg_namelen = 0;
496 copied = skb->len;
497 if (len < copied) {
498 msg->msg_flags |= MSG_TRUNC;
499 copied = len;
502 skb_reset_transport_header(skb);
503 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
505 hci_sock_cmsg(sk, msg, skb);
507 skb_free_datagram(sk, skb);
509 return err ? : copied;
512 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
513 struct msghdr *msg, size_t len)
515 struct sock *sk = sock->sk;
516 struct hci_dev *hdev;
517 struct sk_buff *skb;
518 int err;
520 BT_DBG("sock %p sk %p", sock, sk);
522 if (msg->msg_flags & MSG_OOB)
523 return -EOPNOTSUPP;
525 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
526 return -EINVAL;
528 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
529 return -EINVAL;
531 lock_sock(sk);
533 switch (hci_pi(sk)->channel) {
534 case HCI_CHANNEL_RAW:
535 break;
536 case HCI_CHANNEL_CONTROL:
537 err = mgmt_control(sk, msg, len);
538 goto done;
539 default:
540 err = -EINVAL;
541 goto done;
544 hdev = hci_pi(sk)->hdev;
545 if (!hdev) {
546 err = -EBADFD;
547 goto done;
550 if (!test_bit(HCI_UP, &hdev->flags)) {
551 err = -ENETDOWN;
552 goto done;
555 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
556 if (!skb)
557 goto done;
559 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
560 err = -EFAULT;
561 goto drop;
564 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
565 skb_pull(skb, 1);
566 skb->dev = (void *) hdev;
568 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
569 u16 opcode = get_unaligned_le16(skb->data);
570 u16 ogf = hci_opcode_ogf(opcode);
571 u16 ocf = hci_opcode_ocf(opcode);
573 if (((ogf > HCI_SFLT_MAX_OGF) ||
574 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
575 !capable(CAP_NET_RAW)) {
576 err = -EPERM;
577 goto drop;
580 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
581 skb_queue_tail(&hdev->raw_q, skb);
582 tasklet_schedule(&hdev->tx_task);
583 } else {
584 skb_queue_tail(&hdev->cmd_q, skb);
585 tasklet_schedule(&hdev->cmd_task);
587 } else {
588 if (!capable(CAP_NET_RAW)) {
589 err = -EPERM;
590 goto drop;
593 skb_queue_tail(&hdev->raw_q, skb);
594 tasklet_schedule(&hdev->tx_task);
597 err = len;
599 done:
600 release_sock(sk);
601 return err;
603 drop:
604 kfree_skb(skb);
605 goto done;
608 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
610 struct hci_ufilter uf = { .opcode = 0 };
611 struct sock *sk = sock->sk;
612 int err = 0, opt = 0;
614 BT_DBG("sk %p, opt %d", sk, optname);
616 lock_sock(sk);
618 switch (optname) {
619 case HCI_DATA_DIR:
620 if (get_user(opt, (int __user *)optval)) {
621 err = -EFAULT;
622 break;
625 if (opt)
626 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
627 else
628 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
629 break;
631 case HCI_TIME_STAMP:
632 if (get_user(opt, (int __user *)optval)) {
633 err = -EFAULT;
634 break;
637 if (opt)
638 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
639 else
640 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
641 break;
643 case HCI_FILTER:
645 struct hci_filter *f = &hci_pi(sk)->filter;
647 uf.type_mask = f->type_mask;
648 uf.opcode = f->opcode;
649 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
650 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
653 len = min_t(unsigned int, len, sizeof(uf));
654 if (copy_from_user(&uf, optval, len)) {
655 err = -EFAULT;
656 break;
659 if (!capable(CAP_NET_RAW)) {
660 uf.type_mask &= hci_sec_filter.type_mask;
661 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
662 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
666 struct hci_filter *f = &hci_pi(sk)->filter;
668 f->type_mask = uf.type_mask;
669 f->opcode = uf.opcode;
670 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
671 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
673 break;
675 default:
676 err = -ENOPROTOOPT;
677 break;
680 release_sock(sk);
681 return err;
684 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
686 struct hci_ufilter uf;
687 struct sock *sk = sock->sk;
688 int len, opt;
690 if (get_user(len, optlen))
691 return -EFAULT;
693 switch (optname) {
694 case HCI_DATA_DIR:
695 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
696 opt = 1;
697 else
698 opt = 0;
700 if (put_user(opt, optval))
701 return -EFAULT;
702 break;
704 case HCI_TIME_STAMP:
705 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
706 opt = 1;
707 else
708 opt = 0;
710 if (put_user(opt, optval))
711 return -EFAULT;
712 break;
714 case HCI_FILTER:
716 struct hci_filter *f = &hci_pi(sk)->filter;
718 uf.type_mask = f->type_mask;
719 uf.opcode = f->opcode;
720 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
721 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
724 len = min_t(unsigned int, len, sizeof(uf));
725 if (copy_to_user(optval, &uf, len))
726 return -EFAULT;
727 break;
729 default:
730 return -ENOPROTOOPT;
731 break;
734 return 0;
737 static const struct proto_ops hci_sock_ops = {
738 .family = PF_BLUETOOTH,
739 .owner = THIS_MODULE,
740 .release = hci_sock_release,
741 .bind = hci_sock_bind,
742 .getname = hci_sock_getname,
743 .sendmsg = hci_sock_sendmsg,
744 .recvmsg = hci_sock_recvmsg,
745 .ioctl = hci_sock_ioctl,
746 .poll = datagram_poll,
747 .listen = sock_no_listen,
748 .shutdown = sock_no_shutdown,
749 .setsockopt = hci_sock_setsockopt,
750 .getsockopt = hci_sock_getsockopt,
751 .connect = sock_no_connect,
752 .socketpair = sock_no_socketpair,
753 .accept = sock_no_accept,
754 .mmap = sock_no_mmap
757 static struct proto hci_sk_proto = {
758 .name = "HCI",
759 .owner = THIS_MODULE,
760 .obj_size = sizeof(struct hci_pinfo)
763 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
764 int kern)
766 struct sock *sk;
768 BT_DBG("sock %p", sock);
770 if (sock->type != SOCK_RAW)
771 return -ESOCKTNOSUPPORT;
773 sock->ops = &hci_sock_ops;
775 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
776 if (!sk)
777 return -ENOMEM;
779 sock_init_data(sock, sk);
781 sock_reset_flag(sk, SOCK_ZAPPED);
783 sk->sk_protocol = protocol;
785 sock->state = SS_UNCONNECTED;
786 sk->sk_state = BT_OPEN;
788 bt_sock_link(&hci_sk_list, sk);
789 return 0;
792 static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
794 struct hci_dev *hdev = (struct hci_dev *) ptr;
795 struct hci_ev_si_device ev;
797 BT_DBG("hdev %s event %ld", hdev->name, event);
799 /* Send event to sockets */
800 ev.event = event;
801 ev.dev_id = hdev->id;
802 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
804 if (event == HCI_DEV_UNREG) {
805 struct sock *sk;
806 struct hlist_node *node;
808 /* Detach sockets from device */
809 read_lock(&hci_sk_list.lock);
810 sk_for_each(sk, node, &hci_sk_list.head) {
811 local_bh_disable();
812 bh_lock_sock_nested(sk);
813 if (hci_pi(sk)->hdev == hdev) {
814 hci_pi(sk)->hdev = NULL;
815 sk->sk_err = EPIPE;
816 sk->sk_state = BT_OPEN;
817 sk->sk_state_change(sk);
819 hci_dev_put(hdev);
821 bh_unlock_sock(sk);
822 local_bh_enable();
824 read_unlock(&hci_sk_list.lock);
827 return NOTIFY_DONE;
830 static const struct net_proto_family hci_sock_family_ops = {
831 .family = PF_BLUETOOTH,
832 .owner = THIS_MODULE,
833 .create = hci_sock_create,
836 static struct notifier_block hci_sock_nblock = {
837 .notifier_call = hci_sock_dev_event
840 int __init hci_sock_init(void)
842 int err;
844 err = proto_register(&hci_sk_proto, 0);
845 if (err < 0)
846 return err;
848 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
849 if (err < 0)
850 goto error;
852 hci_register_notifier(&hci_sock_nblock);
854 BT_INFO("HCI socket layer initialized");
856 return 0;
858 error:
859 BT_ERR("HCI socket registration failed");
860 proto_unregister(&hci_sk_proto);
861 return err;
864 void hci_sock_cleanup(void)
866 if (bt_sock_unregister(BTPROTO_HCI) < 0)
867 BT_ERR("HCI socket unregistration failed");
869 hci_unregister_notifier(&hci_sock_nblock);
871 proto_unregister(&hci_sk_proto);
874 module_param(enable_mgmt, bool, 0644);
875 MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");