Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / bluetooth / af_bluetooth.c
blob4ef6a54403aa23e6cb1caa3aaa29f14721132a19
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth address family and sockets. */
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
32 #include <asm/ioctls.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
37 #include "leds.h"
38 #include "selftest.h"
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO 8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 "sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 "sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 "slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 "slock-AF_BLUETOOTH-BTPROTO_HCI",
61 "slock-AF_BLUETOOTH-BTPROTO_SCO",
62 "slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 "slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 "slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 "slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
71 BUG_ON(!sk);
72 BUG_ON(!sock_allow_reclassification(sk));
74 sock_lock_init_class_and_name(sk,
75 bt_slock_key_strings[proto], &bt_slock_key[proto],
76 bt_key_strings[proto], &bt_lock_key[proto]);
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
80 int bt_sock_register(int proto, const struct net_proto_family *ops)
82 int err = 0;
84 if (proto < 0 || proto >= BT_MAX_PROTO)
85 return -EINVAL;
87 write_lock(&bt_proto_lock);
89 if (bt_proto[proto])
90 err = -EEXIST;
91 else
92 bt_proto[proto] = ops;
94 write_unlock(&bt_proto_lock);
96 return err;
98 EXPORT_SYMBOL(bt_sock_register);
100 void bt_sock_unregister(int proto)
102 if (proto < 0 || proto >= BT_MAX_PROTO)
103 return;
105 write_lock(&bt_proto_lock);
106 bt_proto[proto] = NULL;
107 write_unlock(&bt_proto_lock);
109 EXPORT_SYMBOL(bt_sock_unregister);
111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 int kern)
114 int err;
116 if (net != &init_net)
117 return -EAFNOSUPPORT;
119 if (proto < 0 || proto >= BT_MAX_PROTO)
120 return -EINVAL;
122 if (!bt_proto[proto])
123 request_module("bt-proto-%d", proto);
125 err = -EPROTONOSUPPORT;
127 read_lock(&bt_proto_lock);
129 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 err = bt_proto[proto]->create(net, sock, proto, kern);
131 if (!err)
132 bt_sock_reclassify_lock(sock->sk, proto);
133 module_put(bt_proto[proto]->owner);
136 read_unlock(&bt_proto_lock);
138 return err;
141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
143 write_lock(&l->lock);
144 sk_add_node(sk, &l->head);
145 write_unlock(&l->lock);
147 EXPORT_SYMBOL(bt_sock_link);
149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
151 write_lock(&l->lock);
152 sk_del_node_init(sk);
153 write_unlock(&l->lock);
155 EXPORT_SYMBOL(bt_sock_unlink);
157 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
159 BT_DBG("parent %p, sk %p", parent, sk);
161 sock_hold(sk);
163 if (bh)
164 bh_lock_sock_nested(sk);
165 else
166 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
168 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
169 bt_sk(sk)->parent = parent;
171 if (bh)
172 bh_unlock_sock(sk);
173 else
174 release_sock(sk);
176 sk_acceptq_added(parent);
178 EXPORT_SYMBOL(bt_accept_enqueue);
180 /* Calling function must hold the sk lock.
181 * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
183 void bt_accept_unlink(struct sock *sk)
185 BT_DBG("sk %p state %d", sk, sk->sk_state);
187 list_del_init(&bt_sk(sk)->accept_q);
188 sk_acceptq_removed(bt_sk(sk)->parent);
189 bt_sk(sk)->parent = NULL;
190 sock_put(sk);
192 EXPORT_SYMBOL(bt_accept_unlink);
194 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
196 struct bt_sock *s, *n;
197 struct sock *sk;
199 BT_DBG("parent %p", parent);
201 restart:
202 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
203 sk = (struct sock *)s;
205 /* Prevent early freeing of sk due to unlink and sock_kill */
206 sock_hold(sk);
207 lock_sock(sk);
209 /* Check sk has not already been unlinked via
210 * bt_accept_unlink() due to serialisation caused by sk locking
212 if (!bt_sk(sk)->parent) {
213 BT_DBG("sk %p, already unlinked", sk);
214 release_sock(sk);
215 sock_put(sk);
217 /* Restart the loop as sk is no longer in the list
218 * and also avoid a potential infinite loop because
219 * list_for_each_entry_safe() is not thread safe.
221 goto restart;
224 /* sk is safely in the parent list so reduce reference count */
225 sock_put(sk);
227 /* FIXME: Is this check still needed */
228 if (sk->sk_state == BT_CLOSED) {
229 bt_accept_unlink(sk);
230 release_sock(sk);
231 continue;
234 if (sk->sk_state == BT_CONNECTED || !newsock ||
235 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
236 bt_accept_unlink(sk);
237 if (newsock)
238 sock_graft(sk, newsock);
240 release_sock(sk);
241 return sk;
244 release_sock(sk);
247 return NULL;
249 EXPORT_SYMBOL(bt_accept_dequeue);
251 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
252 int flags)
254 int noblock = flags & MSG_DONTWAIT;
255 struct sock *sk = sock->sk;
256 struct sk_buff *skb;
257 size_t copied;
258 size_t skblen;
259 int err;
261 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
263 if (flags & MSG_OOB)
264 return -EOPNOTSUPP;
266 skb = skb_recv_datagram(sk, flags, noblock, &err);
267 if (!skb) {
268 if (sk->sk_shutdown & RCV_SHUTDOWN)
269 return 0;
271 return err;
274 skblen = skb->len;
275 copied = skb->len;
276 if (len < copied) {
277 msg->msg_flags |= MSG_TRUNC;
278 copied = len;
281 skb_reset_transport_header(skb);
282 err = skb_copy_datagram_msg(skb, 0, msg, copied);
283 if (err == 0) {
284 sock_recv_ts_and_drops(msg, sk, skb);
286 if (msg->msg_name && bt_sk(sk)->skb_msg_name)
287 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
288 &msg->msg_namelen);
290 if (bt_sk(sk)->skb_put_cmsg)
291 bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
294 skb_free_datagram(sk, skb);
296 if (flags & MSG_TRUNC)
297 copied = skblen;
299 return err ? : copied;
301 EXPORT_SYMBOL(bt_sock_recvmsg);
303 static long bt_sock_data_wait(struct sock *sk, long timeo)
305 DECLARE_WAITQUEUE(wait, current);
307 add_wait_queue(sk_sleep(sk), &wait);
308 for (;;) {
309 set_current_state(TASK_INTERRUPTIBLE);
311 if (!skb_queue_empty(&sk->sk_receive_queue))
312 break;
314 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
315 break;
317 if (signal_pending(current) || !timeo)
318 break;
320 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
321 release_sock(sk);
322 timeo = schedule_timeout(timeo);
323 lock_sock(sk);
324 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
327 __set_current_state(TASK_RUNNING);
328 remove_wait_queue(sk_sleep(sk), &wait);
329 return timeo;
332 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
333 size_t size, int flags)
335 struct sock *sk = sock->sk;
336 int err = 0;
337 size_t target, copied = 0;
338 long timeo;
340 if (flags & MSG_OOB)
341 return -EOPNOTSUPP;
343 BT_DBG("sk %p size %zu", sk, size);
345 lock_sock(sk);
347 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
348 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
350 do {
351 struct sk_buff *skb;
352 int chunk;
354 skb = skb_dequeue(&sk->sk_receive_queue);
355 if (!skb) {
356 if (copied >= target)
357 break;
359 err = sock_error(sk);
360 if (err)
361 break;
362 if (sk->sk_shutdown & RCV_SHUTDOWN)
363 break;
365 err = -EAGAIN;
366 if (!timeo)
367 break;
369 timeo = bt_sock_data_wait(sk, timeo);
371 if (signal_pending(current)) {
372 err = sock_intr_errno(timeo);
373 goto out;
375 continue;
378 chunk = min_t(unsigned int, skb->len, size);
379 if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
380 skb_queue_head(&sk->sk_receive_queue, skb);
381 if (!copied)
382 copied = -EFAULT;
383 break;
385 copied += chunk;
386 size -= chunk;
388 sock_recv_ts_and_drops(msg, sk, skb);
390 if (!(flags & MSG_PEEK)) {
391 int skb_len = skb_headlen(skb);
393 if (chunk <= skb_len) {
394 __skb_pull(skb, chunk);
395 } else {
396 struct sk_buff *frag;
398 __skb_pull(skb, skb_len);
399 chunk -= skb_len;
401 skb_walk_frags(skb, frag) {
402 if (chunk <= frag->len) {
403 /* Pulling partial data */
404 skb->len -= chunk;
405 skb->data_len -= chunk;
406 __skb_pull(frag, chunk);
407 break;
408 } else if (frag->len) {
409 /* Pulling all frag data */
410 chunk -= frag->len;
411 skb->len -= frag->len;
412 skb->data_len -= frag->len;
413 __skb_pull(frag, frag->len);
418 if (skb->len) {
419 skb_queue_head(&sk->sk_receive_queue, skb);
420 break;
422 kfree_skb(skb);
424 } else {
425 /* put message back and return */
426 skb_queue_head(&sk->sk_receive_queue, skb);
427 break;
429 } while (size);
431 out:
432 release_sock(sk);
433 return copied ? : err;
435 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
437 static inline __poll_t bt_accept_poll(struct sock *parent)
439 struct bt_sock *s, *n;
440 struct sock *sk;
442 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
443 sk = (struct sock *)s;
444 if (sk->sk_state == BT_CONNECTED ||
445 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
446 sk->sk_state == BT_CONNECT2))
447 return EPOLLIN | EPOLLRDNORM;
450 return 0;
453 __poll_t bt_sock_poll(struct file *file, struct socket *sock,
454 poll_table *wait)
456 struct sock *sk = sock->sk;
457 __poll_t mask = 0;
459 poll_wait(file, sk_sleep(sk), wait);
461 if (sk->sk_state == BT_LISTEN)
462 return bt_accept_poll(sk);
464 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
465 mask |= EPOLLERR |
466 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
468 if (sk->sk_shutdown & RCV_SHUTDOWN)
469 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
471 if (sk->sk_shutdown == SHUTDOWN_MASK)
472 mask |= EPOLLHUP;
474 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
475 mask |= EPOLLIN | EPOLLRDNORM;
477 if (sk->sk_state == BT_CLOSED)
478 mask |= EPOLLHUP;
480 if (sk->sk_state == BT_CONNECT ||
481 sk->sk_state == BT_CONNECT2 ||
482 sk->sk_state == BT_CONFIG)
483 return mask;
485 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
486 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
487 else
488 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
490 return mask;
492 EXPORT_SYMBOL(bt_sock_poll);
494 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
496 struct sock *sk = sock->sk;
497 struct sk_buff *skb;
498 long amount;
499 int err;
501 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
503 switch (cmd) {
504 case TIOCOUTQ:
505 if (sk->sk_state == BT_LISTEN)
506 return -EINVAL;
508 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
509 if (amount < 0)
510 amount = 0;
511 err = put_user(amount, (int __user *) arg);
512 break;
514 case TIOCINQ:
515 if (sk->sk_state == BT_LISTEN)
516 return -EINVAL;
518 lock_sock(sk);
519 skb = skb_peek(&sk->sk_receive_queue);
520 amount = skb ? skb->len : 0;
521 release_sock(sk);
522 err = put_user(amount, (int __user *) arg);
523 break;
525 default:
526 err = -ENOIOCTLCMD;
527 break;
530 return err;
532 EXPORT_SYMBOL(bt_sock_ioctl);
534 /* This function expects the sk lock to be held when called */
535 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
537 DECLARE_WAITQUEUE(wait, current);
538 int err = 0;
540 BT_DBG("sk %p", sk);
542 add_wait_queue(sk_sleep(sk), &wait);
543 set_current_state(TASK_INTERRUPTIBLE);
544 while (sk->sk_state != state) {
545 if (!timeo) {
546 err = -EINPROGRESS;
547 break;
550 if (signal_pending(current)) {
551 err = sock_intr_errno(timeo);
552 break;
555 release_sock(sk);
556 timeo = schedule_timeout(timeo);
557 lock_sock(sk);
558 set_current_state(TASK_INTERRUPTIBLE);
560 err = sock_error(sk);
561 if (err)
562 break;
564 __set_current_state(TASK_RUNNING);
565 remove_wait_queue(sk_sleep(sk), &wait);
566 return err;
568 EXPORT_SYMBOL(bt_sock_wait_state);
570 /* This function expects the sk lock to be held when called */
571 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
573 DECLARE_WAITQUEUE(wait, current);
574 unsigned long timeo;
575 int err = 0;
577 BT_DBG("sk %p", sk);
579 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
581 add_wait_queue(sk_sleep(sk), &wait);
582 set_current_state(TASK_INTERRUPTIBLE);
583 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
584 if (!timeo) {
585 err = -EAGAIN;
586 break;
589 if (signal_pending(current)) {
590 err = sock_intr_errno(timeo);
591 break;
594 release_sock(sk);
595 timeo = schedule_timeout(timeo);
596 lock_sock(sk);
597 set_current_state(TASK_INTERRUPTIBLE);
599 err = sock_error(sk);
600 if (err)
601 break;
603 __set_current_state(TASK_RUNNING);
604 remove_wait_queue(sk_sleep(sk), &wait);
606 return err;
608 EXPORT_SYMBOL(bt_sock_wait_ready);
610 #ifdef CONFIG_PROC_FS
611 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
612 __acquires(seq->private->l->lock)
614 struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
616 read_lock(&l->lock);
617 return seq_hlist_start_head(&l->head, *pos);
620 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
622 struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
624 return seq_hlist_next(v, &l->head, pos);
627 static void bt_seq_stop(struct seq_file *seq, void *v)
628 __releases(seq->private->l->lock)
630 struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
632 read_unlock(&l->lock);
635 static int bt_seq_show(struct seq_file *seq, void *v)
637 struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
639 if (v == SEQ_START_TOKEN) {
640 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
642 if (l->custom_seq_show) {
643 seq_putc(seq, ' ');
644 l->custom_seq_show(seq, v);
647 seq_putc(seq, '\n');
648 } else {
649 struct sock *sk = sk_entry(v);
650 struct bt_sock *bt = bt_sk(sk);
652 seq_printf(seq,
653 "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
655 refcount_read(&sk->sk_refcnt),
656 sk_rmem_alloc_get(sk),
657 sk_wmem_alloc_get(sk),
658 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
659 sock_i_ino(sk),
660 bt->parent? sock_i_ino(bt->parent): 0LU);
662 if (l->custom_seq_show) {
663 seq_putc(seq, ' ');
664 l->custom_seq_show(seq, v);
667 seq_putc(seq, '\n');
669 return 0;
672 static const struct seq_operations bt_seq_ops = {
673 .start = bt_seq_start,
674 .next = bt_seq_next,
675 .stop = bt_seq_stop,
676 .show = bt_seq_show,
679 int bt_procfs_init(struct net *net, const char *name,
680 struct bt_sock_list *sk_list,
681 int (* seq_show)(struct seq_file *, void *))
683 sk_list->custom_seq_show = seq_show;
685 if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
686 return -ENOMEM;
687 return 0;
690 void bt_procfs_cleanup(struct net *net, const char *name)
692 remove_proc_entry(name, net->proc_net);
694 #else
695 int bt_procfs_init(struct net *net, const char *name,
696 struct bt_sock_list *sk_list,
697 int (* seq_show)(struct seq_file *, void *))
699 return 0;
702 void bt_procfs_cleanup(struct net *net, const char *name)
705 #endif
706 EXPORT_SYMBOL(bt_procfs_init);
707 EXPORT_SYMBOL(bt_procfs_cleanup);
709 static const struct net_proto_family bt_sock_family_ops = {
710 .owner = THIS_MODULE,
711 .family = PF_BLUETOOTH,
712 .create = bt_sock_create,
715 struct dentry *bt_debugfs;
716 EXPORT_SYMBOL_GPL(bt_debugfs);
718 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
719 __stringify(BT_SUBSYS_REVISION)
721 static int __init bt_init(void)
723 int err;
725 sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
727 BT_INFO("Core ver %s", VERSION);
729 err = bt_selftest();
730 if (err < 0)
731 return err;
733 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
735 bt_leds_init();
737 err = bt_sysfs_init();
738 if (err < 0)
739 return err;
741 err = sock_register(&bt_sock_family_ops);
742 if (err)
743 goto cleanup_sysfs;
745 BT_INFO("HCI device and connection manager initialized");
747 err = hci_sock_init();
748 if (err)
749 goto unregister_socket;
751 err = l2cap_init();
752 if (err)
753 goto cleanup_socket;
755 err = sco_init();
756 if (err)
757 goto cleanup_cap;
759 err = mgmt_init();
760 if (err)
761 goto cleanup_sco;
763 return 0;
765 cleanup_sco:
766 sco_exit();
767 cleanup_cap:
768 l2cap_exit();
769 cleanup_socket:
770 hci_sock_cleanup();
771 unregister_socket:
772 sock_unregister(PF_BLUETOOTH);
773 cleanup_sysfs:
774 bt_sysfs_cleanup();
775 return err;
778 static void __exit bt_exit(void)
780 mgmt_exit();
782 sco_exit();
784 l2cap_exit();
786 hci_sock_cleanup();
788 sock_unregister(PF_BLUETOOTH);
790 bt_sysfs_cleanup();
792 bt_leds_cleanup();
794 debugfs_remove_recursive(bt_debugfs);
797 subsys_initcall(bt_init);
798 module_exit(bt_exit);
800 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
801 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
802 MODULE_VERSION(VERSION);
803 MODULE_LICENSE("GPL");
804 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);