net ax25: Reorder ax25_exit to remove races.
[linux/fpc-iii.git] / net / bluetooth / hci_core.c
blob857dc88acb4b93c47a01c6c458e057418661f9b6
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
61 static DEFINE_RWLOCK(hci_task_lock);
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
71 /* HCI protocols */
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_register(&hci_notifier, nb);
85 int hci_unregister_notifier(struct notifier_block *nb)
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 static void hci_notify(struct hci_dev *hdev, int event)
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 return;
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
132 BT_DBG("%s start", hdev->name);
134 hdev->req_status = HCI_REQ_PEND;
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
139 req(hdev, opt);
140 schedule_timeout(timeout);
142 remove_wait_queue(&hdev->req_wait_q, &wait);
144 if (signal_pending(current))
145 return -EINTR;
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
156 default:
157 err = -ETIMEDOUT;
158 break;
161 hdev->req_status = hdev->req_result = 0;
163 BT_DBG("%s end: err %d", hdev->name, err);
165 return err;
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
171 int ret;
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
181 return ret;
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 BT_DBG("%s %ld", hdev->name, opt);
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
195 struct hci_cp_delete_stored_link_key cp;
196 struct sk_buff *skb;
197 __le16 param;
198 __u8 flt_type;
200 BT_DBG("%s %ld", hdev->name, opt);
202 /* Driver initialization */
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
212 skb_queue_purge(&hdev->driver_init);
214 /* Mandatory initialization */
216 /* Reset */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
231 #if 0
232 /* Host buffer size */
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
241 #endif
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
255 /* Optional initialization */
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272 BT_DBG("%s", hdev->name);
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280 __u8 scan = opt;
282 BT_DBG("%s %x", hdev->name, scan);
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290 __u8 auth = opt;
292 BT_DBG("%s %x", hdev->name, auth);
294 /* Authentication */
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300 __u8 encrypt = opt;
302 BT_DBG("%s %x", hdev->name, encrypt);
304 /* Encryption */
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310 __le16 policy = cpu_to_le16(opt);
312 BT_DBG("%s %x", hdev->name, policy);
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
322 struct hci_dev *hdev = NULL;
323 struct list_head *p;
325 BT_DBG("%d", index);
327 if (index < 0)
328 return NULL;
330 read_lock(&hci_dev_list_lock);
331 list_for_each(p, &hci_dev_list) {
332 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
335 break;
338 read_unlock(&hci_dev_list_lock);
339 return hdev;
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
348 BT_DBG("cache %p", cache);
350 cache->list = NULL;
351 while ((e = next)) {
352 next = e->next;
353 kfree(e);
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
366 break;
367 return e;
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
372 struct inquiry_cache *cache = &hdev->inq_cache;
373 struct inquiry_entry *ie;
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 if (!ie) {
379 /* Entry not in the cache. Add new one. */
380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 if (!ie)
382 return;
384 ie->next = cache->list;
385 cache->list = ie;
388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
390 cache->timestamp = jiffies;
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
398 int copied = 0;
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
408 info++;
411 BT_DBG("cache %p, copied %d", cache, copied);
412 return copied;
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
420 BT_DBG("%s", hdev->name);
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
423 return;
425 /* Start Inquiry */
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
432 int hci_inquiry(void __user *arg)
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
438 long timeo;
439 __u8 *buf;
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 return -EFAULT;
444 hdev = hci_dev_get(ir.dev_id);
445 if (!hdev)
446 return -ENODEV;
448 hci_dev_lock_bh(hdev);
449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
452 inquiry_cache_flush(hdev);
453 do_inquiry = 1;
455 hci_dev_unlock_bh(hdev);
457 timeo = ir.length * msecs_to_jiffies(2000);
459 if (do_inquiry) {
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 if (err < 0)
462 goto done;
465 /* for unlimited number of responses we will use buffer with 255 entries */
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
468 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 * copy it to the user space.
471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472 if (!buf) {
473 err = -ENOMEM;
474 goto done;
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
481 BT_DBG("num_rsp %d", ir.num_rsp);
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 ptr += sizeof(ir);
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 ir.num_rsp))
487 err = -EFAULT;
488 } else
489 err = -EFAULT;
491 kfree(buf);
493 done:
494 hci_dev_put(hdev);
495 return err;
498 /* ---- HCI ioctl helpers ---- */
500 int hci_dev_open(__u16 dev)
502 struct hci_dev *hdev;
503 int ret = 0;
505 hdev = hci_dev_get(dev);
506 if (!hdev)
507 return -ENODEV;
509 BT_DBG("%s %p", hdev->name, hdev);
511 hci_req_lock(hdev);
513 if (test_bit(HCI_UNREGISTER, &hdev->flags)) {
514 ret = -ENODEV;
515 goto done;
518 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
519 ret = -ERFKILL;
520 goto done;
523 if (test_bit(HCI_UP, &hdev->flags)) {
524 ret = -EALREADY;
525 goto done;
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
529 set_bit(HCI_RAW, &hdev->flags);
531 /* Treat all non BR/EDR controllers as raw devices for now */
532 if (hdev->dev_type != HCI_BREDR)
533 set_bit(HCI_RAW, &hdev->flags);
535 if (hdev->open(hdev)) {
536 ret = -EIO;
537 goto done;
540 if (!test_bit(HCI_RAW, &hdev->flags)) {
541 atomic_set(&hdev->cmd_cnt, 1);
542 set_bit(HCI_INIT, &hdev->flags);
543 hdev->init_last_cmd = 0;
545 ret = __hci_request(hdev, hci_init_req, 0,
546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
548 if (lmp_host_le_capable(hdev))
549 ret = __hci_request(hdev, hci_le_init_req, 0,
550 msecs_to_jiffies(HCI_INIT_TIMEOUT));
552 clear_bit(HCI_INIT, &hdev->flags);
555 if (!ret) {
556 hci_dev_hold(hdev);
557 set_bit(HCI_UP, &hdev->flags);
558 hci_notify(hdev, HCI_DEV_UP);
559 if (!test_bit(HCI_SETUP, &hdev->flags))
560 mgmt_powered(hdev->id, 1);
561 } else {
562 /* Init failed, cleanup */
563 tasklet_kill(&hdev->rx_task);
564 tasklet_kill(&hdev->tx_task);
565 tasklet_kill(&hdev->cmd_task);
567 skb_queue_purge(&hdev->cmd_q);
568 skb_queue_purge(&hdev->rx_q);
570 if (hdev->flush)
571 hdev->flush(hdev);
573 if (hdev->sent_cmd) {
574 kfree_skb(hdev->sent_cmd);
575 hdev->sent_cmd = NULL;
578 hdev->close(hdev);
579 hdev->flags = 0;
582 done:
583 hci_req_unlock(hdev);
584 hci_dev_put(hdev);
585 return ret;
588 static int hci_dev_do_close(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_cancel(hdev, ENODEV);
593 hci_req_lock(hdev);
595 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
596 del_timer_sync(&hdev->cmd_timer);
597 hci_req_unlock(hdev);
598 return 0;
601 /* Kill RX and TX tasks */
602 tasklet_kill(&hdev->rx_task);
603 tasklet_kill(&hdev->tx_task);
605 hci_dev_lock_bh(hdev);
606 inquiry_cache_flush(hdev);
607 hci_conn_hash_flush(hdev);
608 hci_dev_unlock_bh(hdev);
610 hci_notify(hdev, HCI_DEV_DOWN);
612 if (hdev->flush)
613 hdev->flush(hdev);
615 /* Reset device */
616 skb_queue_purge(&hdev->cmd_q);
617 atomic_set(&hdev->cmd_cnt, 1);
618 if (!test_bit(HCI_RAW, &hdev->flags)) {
619 set_bit(HCI_INIT, &hdev->flags);
620 __hci_request(hdev, hci_reset_req, 0,
621 msecs_to_jiffies(250));
622 clear_bit(HCI_INIT, &hdev->flags);
625 /* Kill cmd task */
626 tasklet_kill(&hdev->cmd_task);
628 /* Drop queues */
629 skb_queue_purge(&hdev->rx_q);
630 skb_queue_purge(&hdev->cmd_q);
631 skb_queue_purge(&hdev->raw_q);
633 /* Drop last sent command */
634 if (hdev->sent_cmd) {
635 del_timer_sync(&hdev->cmd_timer);
636 kfree_skb(hdev->sent_cmd);
637 hdev->sent_cmd = NULL;
640 /* After this point our queues are empty
641 * and no tasks are scheduled. */
642 hdev->close(hdev);
644 mgmt_powered(hdev->id, 0);
646 /* Clear flags */
647 hdev->flags = 0;
649 hci_req_unlock(hdev);
651 hci_dev_put(hdev);
652 return 0;
655 int hci_dev_close(__u16 dev)
657 struct hci_dev *hdev;
658 int err;
660 hdev = hci_dev_get(dev);
661 if (!hdev)
662 return -ENODEV;
663 err = hci_dev_do_close(hdev);
664 hci_dev_put(hdev);
665 return err;
668 int hci_dev_reset(__u16 dev)
670 struct hci_dev *hdev;
671 int ret = 0;
673 hdev = hci_dev_get(dev);
674 if (!hdev)
675 return -ENODEV;
677 hci_req_lock(hdev);
678 tasklet_disable(&hdev->tx_task);
680 if (!test_bit(HCI_UP, &hdev->flags))
681 goto done;
683 /* Drop queues */
684 skb_queue_purge(&hdev->rx_q);
685 skb_queue_purge(&hdev->cmd_q);
687 hci_dev_lock_bh(hdev);
688 inquiry_cache_flush(hdev);
689 hci_conn_hash_flush(hdev);
690 hci_dev_unlock_bh(hdev);
692 if (hdev->flush)
693 hdev->flush(hdev);
695 atomic_set(&hdev->cmd_cnt, 1);
696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
698 if (!test_bit(HCI_RAW, &hdev->flags))
699 ret = __hci_request(hdev, hci_reset_req, 0,
700 msecs_to_jiffies(HCI_INIT_TIMEOUT));
702 done:
703 tasklet_enable(&hdev->tx_task);
704 hci_req_unlock(hdev);
705 hci_dev_put(hdev);
706 return ret;
709 int hci_dev_reset_stat(__u16 dev)
711 struct hci_dev *hdev;
712 int ret = 0;
714 hdev = hci_dev_get(dev);
715 if (!hdev)
716 return -ENODEV;
718 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
720 hci_dev_put(hdev);
722 return ret;
725 int hci_dev_cmd(unsigned int cmd, void __user *arg)
727 struct hci_dev *hdev;
728 struct hci_dev_req dr;
729 int err = 0;
731 if (copy_from_user(&dr, arg, sizeof(dr)))
732 return -EFAULT;
734 hdev = hci_dev_get(dr.dev_id);
735 if (!hdev)
736 return -ENODEV;
738 switch (cmd) {
739 case HCISETAUTH:
740 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT));
742 break;
744 case HCISETENCRYPT:
745 if (!lmp_encrypt_capable(hdev)) {
746 err = -EOPNOTSUPP;
747 break;
750 if (!test_bit(HCI_AUTH, &hdev->flags)) {
751 /* Auth must be enabled first */
752 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
753 msecs_to_jiffies(HCI_INIT_TIMEOUT));
754 if (err)
755 break;
758 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 break;
762 case HCISETSCAN:
763 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
765 break;
767 case HCISETLINKPOL:
768 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
769 msecs_to_jiffies(HCI_INIT_TIMEOUT));
770 break;
772 case HCISETLINKMODE:
773 hdev->link_mode = ((__u16) dr.dev_opt) &
774 (HCI_LM_MASTER | HCI_LM_ACCEPT);
775 break;
777 case HCISETPTYPE:
778 hdev->pkt_type = (__u16) dr.dev_opt;
779 break;
781 case HCISETACLMTU:
782 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
784 break;
786 case HCISETSCOMTU:
787 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
788 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
789 break;
791 default:
792 err = -EINVAL;
793 break;
796 hci_dev_put(hdev);
797 return err;
800 int hci_get_dev_list(void __user *arg)
802 struct hci_dev_list_req *dl;
803 struct hci_dev_req *dr;
804 struct list_head *p;
805 int n = 0, size, err;
806 __u16 dev_num;
808 if (get_user(dev_num, (__u16 __user *) arg))
809 return -EFAULT;
811 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
812 return -EINVAL;
814 size = sizeof(*dl) + dev_num * sizeof(*dr);
816 dl = kzalloc(size, GFP_KERNEL);
817 if (!dl)
818 return -ENOMEM;
820 dr = dl->dev_req;
822 read_lock_bh(&hci_dev_list_lock);
823 list_for_each(p, &hci_dev_list) {
824 struct hci_dev *hdev;
826 hdev = list_entry(p, struct hci_dev, list);
828 hci_del_off_timer(hdev);
830 if (!test_bit(HCI_MGMT, &hdev->flags))
831 set_bit(HCI_PAIRABLE, &hdev->flags);
833 (dr + n)->dev_id = hdev->id;
834 (dr + n)->dev_opt = hdev->flags;
836 if (++n >= dev_num)
837 break;
839 read_unlock_bh(&hci_dev_list_lock);
841 dl->dev_num = n;
842 size = sizeof(*dl) + n * sizeof(*dr);
844 err = copy_to_user(arg, dl, size);
845 kfree(dl);
847 return err ? -EFAULT : 0;
850 int hci_get_dev_info(void __user *arg)
852 struct hci_dev *hdev;
853 struct hci_dev_info di;
854 int err = 0;
856 if (copy_from_user(&di, arg, sizeof(di)))
857 return -EFAULT;
859 hdev = hci_dev_get(di.dev_id);
860 if (!hdev)
861 return -ENODEV;
863 hci_del_off_timer(hdev);
865 if (!test_bit(HCI_MGMT, &hdev->flags))
866 set_bit(HCI_PAIRABLE, &hdev->flags);
868 strcpy(di.name, hdev->name);
869 di.bdaddr = hdev->bdaddr;
870 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
871 di.flags = hdev->flags;
872 di.pkt_type = hdev->pkt_type;
873 di.acl_mtu = hdev->acl_mtu;
874 di.acl_pkts = hdev->acl_pkts;
875 di.sco_mtu = hdev->sco_mtu;
876 di.sco_pkts = hdev->sco_pkts;
877 di.link_policy = hdev->link_policy;
878 di.link_mode = hdev->link_mode;
880 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
881 memcpy(&di.features, &hdev->features, sizeof(di.features));
883 if (copy_to_user(arg, &di, sizeof(di)))
884 err = -EFAULT;
886 hci_dev_put(hdev);
888 return err;
891 /* ---- Interface to HCI drivers ---- */
893 static int hci_rfkill_set_block(void *data, bool blocked)
895 struct hci_dev *hdev = data;
897 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
899 if (!blocked)
900 return 0;
902 hci_dev_do_close(hdev);
904 return 0;
907 static const struct rfkill_ops hci_rfkill_ops = {
908 .set_block = hci_rfkill_set_block,
911 /* Alloc HCI device */
912 struct hci_dev *hci_alloc_dev(void)
914 struct hci_dev *hdev;
916 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
917 if (!hdev)
918 return NULL;
920 skb_queue_head_init(&hdev->driver_init);
922 return hdev;
924 EXPORT_SYMBOL(hci_alloc_dev);
926 /* Free HCI device */
927 void hci_free_dev(struct hci_dev *hdev)
929 skb_queue_purge(&hdev->driver_init);
931 /* will free via device release */
932 put_device(&hdev->dev);
934 EXPORT_SYMBOL(hci_free_dev);
936 static void hci_power_on(struct work_struct *work)
938 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
940 BT_DBG("%s", hdev->name);
942 if (hci_dev_open(hdev->id) < 0)
943 return;
945 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
946 mod_timer(&hdev->off_timer,
947 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
949 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
950 mgmt_index_added(hdev->id);
953 static void hci_power_off(struct work_struct *work)
955 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
957 BT_DBG("%s", hdev->name);
959 hci_dev_close(hdev->id);
962 static void hci_auto_off(unsigned long data)
964 struct hci_dev *hdev = (struct hci_dev *) data;
966 BT_DBG("%s", hdev->name);
968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
970 queue_work(hdev->workqueue, &hdev->power_off);
973 void hci_del_off_timer(struct hci_dev *hdev)
975 BT_DBG("%s", hdev->name);
977 clear_bit(HCI_AUTO_OFF, &hdev->flags);
978 del_timer(&hdev->off_timer);
981 int hci_uuids_clear(struct hci_dev *hdev)
983 struct list_head *p, *n;
985 list_for_each_safe(p, n, &hdev->uuids) {
986 struct bt_uuid *uuid;
988 uuid = list_entry(p, struct bt_uuid, list);
990 list_del(p);
991 kfree(uuid);
994 return 0;
997 int hci_link_keys_clear(struct hci_dev *hdev)
999 struct list_head *p, *n;
1001 list_for_each_safe(p, n, &hdev->link_keys) {
1002 struct link_key *key;
1004 key = list_entry(p, struct link_key, list);
1006 list_del(p);
1007 kfree(key);
1010 return 0;
1013 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1015 struct list_head *p;
1017 list_for_each(p, &hdev->link_keys) {
1018 struct link_key *k;
1020 k = list_entry(p, struct link_key, list);
1022 if (bacmp(bdaddr, &k->bdaddr) == 0)
1023 return k;
1026 return NULL;
1029 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1030 u8 key_type, u8 old_key_type)
1032 /* Legacy key */
1033 if (key_type < 0x03)
1034 return 1;
1036 /* Debug keys are insecure so don't store them persistently */
1037 if (key_type == HCI_LK_DEBUG_COMBINATION)
1038 return 0;
1040 /* Changed combination key and there's no previous one */
1041 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1042 return 0;
1044 /* Security mode 3 case */
1045 if (!conn)
1046 return 1;
1048 /* Neither local nor remote side had no-bonding as requirement */
1049 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1050 return 1;
1052 /* Local side had dedicated bonding as requirement */
1053 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1054 return 1;
1056 /* Remote side had dedicated bonding as requirement */
1057 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1058 return 1;
1060 /* If none of the above criteria match, then don't store the key
1061 * persistently */
1062 return 0;
1065 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1067 struct link_key *k;
1069 list_for_each_entry(k, &hdev->link_keys, list) {
1070 struct key_master_id *id;
1072 if (k->type != HCI_LK_SMP_LTK)
1073 continue;
1075 if (k->dlen != sizeof(*id))
1076 continue;
1078 id = (void *) &k->data;
1079 if (id->ediv == ediv &&
1080 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1081 return k;
1084 return NULL;
1086 EXPORT_SYMBOL(hci_find_ltk);
1088 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1089 bdaddr_t *bdaddr, u8 type)
1091 struct link_key *k;
1093 list_for_each_entry(k, &hdev->link_keys, list)
1094 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1095 return k;
1097 return NULL;
1099 EXPORT_SYMBOL(hci_find_link_key_type);
1101 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1102 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1104 struct link_key *key, *old_key;
1105 u8 old_key_type, persistent;
1107 old_key = hci_find_link_key(hdev, bdaddr);
1108 if (old_key) {
1109 old_key_type = old_key->type;
1110 key = old_key;
1111 } else {
1112 old_key_type = conn ? conn->key_type : 0xff;
1113 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1114 if (!key)
1115 return -ENOMEM;
1116 list_add(&key->list, &hdev->link_keys);
1119 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1121 /* Some buggy controller combinations generate a changed
1122 * combination key for legacy pairing even when there's no
1123 * previous key */
1124 if (type == HCI_LK_CHANGED_COMBINATION &&
1125 (!conn || conn->remote_auth == 0xff) &&
1126 old_key_type == 0xff) {
1127 type = HCI_LK_COMBINATION;
1128 if (conn)
1129 conn->key_type = type;
1132 bacpy(&key->bdaddr, bdaddr);
1133 memcpy(key->val, val, 16);
1134 key->pin_len = pin_len;
1136 if (type == HCI_LK_CHANGED_COMBINATION)
1137 key->type = old_key_type;
1138 else
1139 key->type = type;
1141 if (!new_key)
1142 return 0;
1144 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1146 mgmt_new_key(hdev->id, key, persistent);
1148 if (!persistent) {
1149 list_del(&key->list);
1150 kfree(key);
1153 return 0;
1156 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1157 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1159 struct link_key *key, *old_key;
1160 struct key_master_id *id;
1161 u8 old_key_type;
1163 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1165 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1166 if (old_key) {
1167 key = old_key;
1168 old_key_type = old_key->type;
1169 } else {
1170 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1171 if (!key)
1172 return -ENOMEM;
1173 list_add(&key->list, &hdev->link_keys);
1174 old_key_type = 0xff;
1177 key->dlen = sizeof(*id);
1179 bacpy(&key->bdaddr, bdaddr);
1180 memcpy(key->val, ltk, sizeof(key->val));
1181 key->type = HCI_LK_SMP_LTK;
1182 key->pin_len = key_size;
1184 id = (void *) &key->data;
1185 id->ediv = ediv;
1186 memcpy(id->rand, rand, sizeof(id->rand));
1188 if (new_key)
1189 mgmt_new_key(hdev->id, key, old_key_type);
1191 return 0;
1194 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1196 struct link_key *key;
1198 key = hci_find_link_key(hdev, bdaddr);
1199 if (!key)
1200 return -ENOENT;
1202 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1204 list_del(&key->list);
1205 kfree(key);
1207 return 0;
1210 /* HCI command timer function */
1211 static void hci_cmd_timer(unsigned long arg)
1213 struct hci_dev *hdev = (void *) arg;
1215 BT_ERR("%s command tx timeout", hdev->name);
1216 atomic_set(&hdev->cmd_cnt, 1);
1217 tasklet_schedule(&hdev->cmd_task);
1220 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1221 bdaddr_t *bdaddr)
1223 struct oob_data *data;
1225 list_for_each_entry(data, &hdev->remote_oob_data, list)
1226 if (bacmp(bdaddr, &data->bdaddr) == 0)
1227 return data;
1229 return NULL;
1232 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234 struct oob_data *data;
1236 data = hci_find_remote_oob_data(hdev, bdaddr);
1237 if (!data)
1238 return -ENOENT;
1240 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1242 list_del(&data->list);
1243 kfree(data);
1245 return 0;
1248 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1250 struct oob_data *data, *n;
1252 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1253 list_del(&data->list);
1254 kfree(data);
1257 return 0;
1260 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1261 u8 *randomizer)
1263 struct oob_data *data;
1265 data = hci_find_remote_oob_data(hdev, bdaddr);
1267 if (!data) {
1268 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1269 if (!data)
1270 return -ENOMEM;
1272 bacpy(&data->bdaddr, bdaddr);
1273 list_add(&data->list, &hdev->remote_oob_data);
1276 memcpy(data->hash, hash, sizeof(data->hash));
1277 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1279 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1281 return 0;
1284 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1285 bdaddr_t *bdaddr)
1287 struct list_head *p;
1289 list_for_each(p, &hdev->blacklist) {
1290 struct bdaddr_list *b;
1292 b = list_entry(p, struct bdaddr_list, list);
1294 if (bacmp(bdaddr, &b->bdaddr) == 0)
1295 return b;
1298 return NULL;
1301 int hci_blacklist_clear(struct hci_dev *hdev)
1303 struct list_head *p, *n;
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1308 b = list_entry(p, struct bdaddr_list, list);
1310 list_del(p);
1311 kfree(b);
1314 return 0;
1317 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319 struct bdaddr_list *entry;
1321 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1322 return -EBADF;
1324 if (hci_blacklist_lookup(hdev, bdaddr))
1325 return -EEXIST;
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1328 if (!entry)
1329 return -ENOMEM;
1331 bacpy(&entry->bdaddr, bdaddr);
1333 list_add(&entry->list, &hdev->blacklist);
1335 return mgmt_device_blocked(hdev->id, bdaddr);
1338 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1340 struct bdaddr_list *entry;
1342 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1343 return hci_blacklist_clear(hdev);
1346 entry = hci_blacklist_lookup(hdev, bdaddr);
1347 if (!entry) {
1348 return -ENOENT;
1351 list_del(&entry->list);
1352 kfree(entry);
1354 return mgmt_device_unblocked(hdev->id, bdaddr);
1357 static void hci_clear_adv_cache(unsigned long arg)
1359 struct hci_dev *hdev = (void *) arg;
1361 hci_dev_lock(hdev);
1363 hci_adv_entries_clear(hdev);
1365 hci_dev_unlock(hdev);
1368 int hci_adv_entries_clear(struct hci_dev *hdev)
1370 struct adv_entry *entry, *tmp;
1372 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1373 list_del(&entry->list);
1374 kfree(entry);
1377 BT_DBG("%s adv cache cleared", hdev->name);
1379 return 0;
1382 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1384 struct adv_entry *entry;
1386 list_for_each_entry(entry, &hdev->adv_entries, list)
1387 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1388 return entry;
1390 return NULL;
1393 static inline int is_connectable_adv(u8 evt_type)
1395 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1396 return 1;
1398 return 0;
1401 int hci_add_adv_entry(struct hci_dev *hdev,
1402 struct hci_ev_le_advertising_info *ev)
1404 struct adv_entry *entry;
1406 if (!is_connectable_adv(ev->evt_type))
1407 return -EINVAL;
1409 /* Only new entries should be added to adv_entries. So, if
1410 * bdaddr was found, don't add it. */
1411 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1412 return 0;
1414 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1415 if (!entry)
1416 return -ENOMEM;
1418 bacpy(&entry->bdaddr, &ev->bdaddr);
1419 entry->bdaddr_type = ev->bdaddr_type;
1421 list_add(&entry->list, &hdev->adv_entries);
1423 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1424 batostr(&entry->bdaddr), entry->bdaddr_type);
1426 return 0;
1429 /* Register HCI device */
1430 int hci_register_dev(struct hci_dev *hdev)
1432 struct list_head *head = &hci_dev_list, *p;
1433 int i, id = 0;
1435 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1436 hdev->bus, hdev->owner);
1438 if (!hdev->open || !hdev->close || !hdev->destruct)
1439 return -EINVAL;
1441 write_lock_bh(&hci_dev_list_lock);
1443 /* Find first available device id */
1444 list_for_each(p, &hci_dev_list) {
1445 if (list_entry(p, struct hci_dev, list)->id != id)
1446 break;
1447 head = p; id++;
1450 sprintf(hdev->name, "hci%d", id);
1451 hdev->id = id;
1452 list_add(&hdev->list, head);
1454 atomic_set(&hdev->refcnt, 1);
1455 spin_lock_init(&hdev->lock);
1457 hdev->flags = 0;
1458 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1459 hdev->esco_type = (ESCO_HV1);
1460 hdev->link_mode = (HCI_LM_ACCEPT);
1461 hdev->io_capability = 0x03; /* No Input No Output */
1463 hdev->idle_timeout = 0;
1464 hdev->sniff_max_interval = 800;
1465 hdev->sniff_min_interval = 80;
1467 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1468 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1469 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1471 skb_queue_head_init(&hdev->rx_q);
1472 skb_queue_head_init(&hdev->cmd_q);
1473 skb_queue_head_init(&hdev->raw_q);
1475 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1477 for (i = 0; i < NUM_REASSEMBLY; i++)
1478 hdev->reassembly[i] = NULL;
1480 init_waitqueue_head(&hdev->req_wait_q);
1481 mutex_init(&hdev->req_lock);
1483 inquiry_cache_init(hdev);
1485 hci_conn_hash_init(hdev);
1487 INIT_LIST_HEAD(&hdev->blacklist);
1489 INIT_LIST_HEAD(&hdev->uuids);
1491 INIT_LIST_HEAD(&hdev->link_keys);
1493 INIT_LIST_HEAD(&hdev->remote_oob_data);
1495 INIT_LIST_HEAD(&hdev->adv_entries);
1496 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1497 (unsigned long) hdev);
1499 INIT_WORK(&hdev->power_on, hci_power_on);
1500 INIT_WORK(&hdev->power_off, hci_power_off);
1501 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1503 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1505 atomic_set(&hdev->promisc, 0);
1507 write_unlock_bh(&hci_dev_list_lock);
1509 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1510 if (!hdev->workqueue)
1511 goto nomem;
1513 hci_register_sysfs(hdev);
1515 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517 if (hdev->rfkill) {
1518 if (rfkill_register(hdev->rfkill) < 0) {
1519 rfkill_destroy(hdev->rfkill);
1520 hdev->rfkill = NULL;
1524 set_bit(HCI_AUTO_OFF, &hdev->flags);
1525 set_bit(HCI_SETUP, &hdev->flags);
1526 queue_work(hdev->workqueue, &hdev->power_on);
1528 hci_notify(hdev, HCI_DEV_REG);
1530 return id;
1532 nomem:
1533 write_lock_bh(&hci_dev_list_lock);
1534 list_del(&hdev->list);
1535 write_unlock_bh(&hci_dev_list_lock);
1537 return -ENOMEM;
1539 EXPORT_SYMBOL(hci_register_dev);
1541 /* Unregister HCI device */
1542 int hci_unregister_dev(struct hci_dev *hdev)
1544 int i;
1546 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1548 set_bit(HCI_UNREGISTER, &hdev->flags);
1550 write_lock_bh(&hci_dev_list_lock);
1551 list_del(&hdev->list);
1552 write_unlock_bh(&hci_dev_list_lock);
1554 hci_dev_do_close(hdev);
1556 for (i = 0; i < NUM_REASSEMBLY; i++)
1557 kfree_skb(hdev->reassembly[i]);
1559 if (!test_bit(HCI_INIT, &hdev->flags) &&
1560 !test_bit(HCI_SETUP, &hdev->flags))
1561 mgmt_index_removed(hdev->id);
1563 hci_notify(hdev, HCI_DEV_UNREG);
1565 if (hdev->rfkill) {
1566 rfkill_unregister(hdev->rfkill);
1567 rfkill_destroy(hdev->rfkill);
1570 hci_unregister_sysfs(hdev);
1572 hci_del_off_timer(hdev);
1573 del_timer(&hdev->adv_timer);
1575 destroy_workqueue(hdev->workqueue);
1577 hci_dev_lock_bh(hdev);
1578 hci_blacklist_clear(hdev);
1579 hci_uuids_clear(hdev);
1580 hci_link_keys_clear(hdev);
1581 hci_remote_oob_data_clear(hdev);
1582 hci_adv_entries_clear(hdev);
1583 hci_dev_unlock_bh(hdev);
1585 __hci_dev_put(hdev);
1587 return 0;
1589 EXPORT_SYMBOL(hci_unregister_dev);
1591 /* Suspend HCI device */
1592 int hci_suspend_dev(struct hci_dev *hdev)
1594 hci_notify(hdev, HCI_DEV_SUSPEND);
1595 return 0;
1597 EXPORT_SYMBOL(hci_suspend_dev);
1599 /* Resume HCI device */
1600 int hci_resume_dev(struct hci_dev *hdev)
1602 hci_notify(hdev, HCI_DEV_RESUME);
1603 return 0;
1605 EXPORT_SYMBOL(hci_resume_dev);
1607 /* Receive frame from HCI drivers */
1608 int hci_recv_frame(struct sk_buff *skb)
1610 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1611 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1612 && !test_bit(HCI_INIT, &hdev->flags))) {
1613 kfree_skb(skb);
1614 return -ENXIO;
1617 /* Incomming skb */
1618 bt_cb(skb)->incoming = 1;
1620 /* Time stamp */
1621 __net_timestamp(skb);
1623 /* Queue frame for rx task */
1624 skb_queue_tail(&hdev->rx_q, skb);
1625 tasklet_schedule(&hdev->rx_task);
1627 return 0;
1629 EXPORT_SYMBOL(hci_recv_frame);
1631 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1632 int count, __u8 index)
1634 int len = 0;
1635 int hlen = 0;
1636 int remain = count;
1637 struct sk_buff *skb;
1638 struct bt_skb_cb *scb;
1640 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1641 index >= NUM_REASSEMBLY)
1642 return -EILSEQ;
1644 skb = hdev->reassembly[index];
1646 if (!skb) {
1647 switch (type) {
1648 case HCI_ACLDATA_PKT:
1649 len = HCI_MAX_FRAME_SIZE;
1650 hlen = HCI_ACL_HDR_SIZE;
1651 break;
1652 case HCI_EVENT_PKT:
1653 len = HCI_MAX_EVENT_SIZE;
1654 hlen = HCI_EVENT_HDR_SIZE;
1655 break;
1656 case HCI_SCODATA_PKT:
1657 len = HCI_MAX_SCO_SIZE;
1658 hlen = HCI_SCO_HDR_SIZE;
1659 break;
1662 skb = bt_skb_alloc(len, GFP_ATOMIC);
1663 if (!skb)
1664 return -ENOMEM;
1666 scb = (void *) skb->cb;
1667 scb->expect = hlen;
1668 scb->pkt_type = type;
1670 skb->dev = (void *) hdev;
1671 hdev->reassembly[index] = skb;
1674 while (count) {
1675 scb = (void *) skb->cb;
1676 len = min(scb->expect, (__u16)count);
1678 memcpy(skb_put(skb, len), data, len);
1680 count -= len;
1681 data += len;
1682 scb->expect -= len;
1683 remain = count;
1685 switch (type) {
1686 case HCI_EVENT_PKT:
1687 if (skb->len == HCI_EVENT_HDR_SIZE) {
1688 struct hci_event_hdr *h = hci_event_hdr(skb);
1689 scb->expect = h->plen;
1691 if (skb_tailroom(skb) < scb->expect) {
1692 kfree_skb(skb);
1693 hdev->reassembly[index] = NULL;
1694 return -ENOMEM;
1697 break;
1699 case HCI_ACLDATA_PKT:
1700 if (skb->len == HCI_ACL_HDR_SIZE) {
1701 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1702 scb->expect = __le16_to_cpu(h->dlen);
1704 if (skb_tailroom(skb) < scb->expect) {
1705 kfree_skb(skb);
1706 hdev->reassembly[index] = NULL;
1707 return -ENOMEM;
1710 break;
1712 case HCI_SCODATA_PKT:
1713 if (skb->len == HCI_SCO_HDR_SIZE) {
1714 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1715 scb->expect = h->dlen;
1717 if (skb_tailroom(skb) < scb->expect) {
1718 kfree_skb(skb);
1719 hdev->reassembly[index] = NULL;
1720 return -ENOMEM;
1723 break;
1726 if (scb->expect == 0) {
1727 /* Complete frame */
1729 bt_cb(skb)->pkt_type = type;
1730 hci_recv_frame(skb);
1732 hdev->reassembly[index] = NULL;
1733 return remain;
1737 return remain;
1740 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1742 int rem = 0;
1744 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1745 return -EILSEQ;
1747 while (count) {
1748 rem = hci_reassembly(hdev, type, data, count, type - 1);
1749 if (rem < 0)
1750 return rem;
1752 data += (count - rem);
1753 count = rem;
1756 return rem;
1758 EXPORT_SYMBOL(hci_recv_fragment);
1760 #define STREAM_REASSEMBLY 0
1762 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1764 int type;
1765 int rem = 0;
1767 while (count) {
1768 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1770 if (!skb) {
1771 struct { char type; } *pkt;
1773 /* Start of the frame */
1774 pkt = data;
1775 type = pkt->type;
1777 data++;
1778 count--;
1779 } else
1780 type = bt_cb(skb)->pkt_type;
1782 rem = hci_reassembly(hdev, type, data, count,
1783 STREAM_REASSEMBLY);
1784 if (rem < 0)
1785 return rem;
1787 data += (count - rem);
1788 count = rem;
1791 return rem;
1793 EXPORT_SYMBOL(hci_recv_stream_fragment);
1795 /* ---- Interface to upper protocols ---- */
1797 /* Register/Unregister protocols.
1798 * hci_task_lock is used to ensure that no tasks are running. */
1799 int hci_register_proto(struct hci_proto *hp)
1801 int err = 0;
1803 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1805 if (hp->id >= HCI_MAX_PROTO)
1806 return -EINVAL;
1808 write_lock_bh(&hci_task_lock);
1810 if (!hci_proto[hp->id])
1811 hci_proto[hp->id] = hp;
1812 else
1813 err = -EEXIST;
1815 write_unlock_bh(&hci_task_lock);
1817 return err;
1819 EXPORT_SYMBOL(hci_register_proto);
1821 int hci_unregister_proto(struct hci_proto *hp)
1823 int err = 0;
1825 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1827 if (hp->id >= HCI_MAX_PROTO)
1828 return -EINVAL;
1830 write_lock_bh(&hci_task_lock);
1832 if (hci_proto[hp->id])
1833 hci_proto[hp->id] = NULL;
1834 else
1835 err = -ENOENT;
1837 write_unlock_bh(&hci_task_lock);
1839 return err;
1841 EXPORT_SYMBOL(hci_unregister_proto);
1843 int hci_register_cb(struct hci_cb *cb)
1845 BT_DBG("%p name %s", cb, cb->name);
1847 write_lock_bh(&hci_cb_list_lock);
1848 list_add(&cb->list, &hci_cb_list);
1849 write_unlock_bh(&hci_cb_list_lock);
1851 return 0;
1853 EXPORT_SYMBOL(hci_register_cb);
1855 int hci_unregister_cb(struct hci_cb *cb)
1857 BT_DBG("%p name %s", cb, cb->name);
1859 write_lock_bh(&hci_cb_list_lock);
1860 list_del(&cb->list);
1861 write_unlock_bh(&hci_cb_list_lock);
1863 return 0;
1865 EXPORT_SYMBOL(hci_unregister_cb);
1867 static int hci_send_frame(struct sk_buff *skb)
1869 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1871 if (!hdev) {
1872 kfree_skb(skb);
1873 return -ENODEV;
1876 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1878 if (atomic_read(&hdev->promisc)) {
1879 /* Time stamp */
1880 __net_timestamp(skb);
1882 hci_send_to_sock(hdev, skb, NULL);
1885 /* Get rid of skb owner, prior to sending to the driver. */
1886 skb_orphan(skb);
1888 return hdev->send(skb);
1891 /* Send HCI command */
1892 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1894 int len = HCI_COMMAND_HDR_SIZE + plen;
1895 struct hci_command_hdr *hdr;
1896 struct sk_buff *skb;
1898 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1900 skb = bt_skb_alloc(len, GFP_ATOMIC);
1901 if (!skb) {
1902 BT_ERR("%s no memory for command", hdev->name);
1903 return -ENOMEM;
1906 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1907 hdr->opcode = cpu_to_le16(opcode);
1908 hdr->plen = plen;
1910 if (plen)
1911 memcpy(skb_put(skb, plen), param, plen);
1913 BT_DBG("skb len %d", skb->len);
1915 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1916 skb->dev = (void *) hdev;
1918 if (test_bit(HCI_INIT, &hdev->flags))
1919 hdev->init_last_cmd = opcode;
1921 skb_queue_tail(&hdev->cmd_q, skb);
1922 tasklet_schedule(&hdev->cmd_task);
1924 return 0;
1927 /* Get data from the previously sent command */
1928 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1930 struct hci_command_hdr *hdr;
1932 if (!hdev->sent_cmd)
1933 return NULL;
1935 hdr = (void *) hdev->sent_cmd->data;
1937 if (hdr->opcode != cpu_to_le16(opcode))
1938 return NULL;
1940 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1942 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1945 /* Send ACL data */
1946 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1948 struct hci_acl_hdr *hdr;
1949 int len = skb->len;
1951 skb_push(skb, HCI_ACL_HDR_SIZE);
1952 skb_reset_transport_header(skb);
1953 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1954 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1955 hdr->dlen = cpu_to_le16(len);
1958 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1960 struct hci_dev *hdev = conn->hdev;
1961 struct sk_buff *list;
1963 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1965 skb->dev = (void *) hdev;
1966 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1967 hci_add_acl_hdr(skb, conn->handle, flags);
1969 list = skb_shinfo(skb)->frag_list;
1970 if (!list) {
1971 /* Non fragmented */
1972 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1974 skb_queue_tail(&conn->data_q, skb);
1975 } else {
1976 /* Fragmented */
1977 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1979 skb_shinfo(skb)->frag_list = NULL;
1981 /* Queue all fragments atomically */
1982 spin_lock_bh(&conn->data_q.lock);
1984 __skb_queue_tail(&conn->data_q, skb);
1986 flags &= ~ACL_START;
1987 flags |= ACL_CONT;
1988 do {
1989 skb = list; list = list->next;
1991 skb->dev = (void *) hdev;
1992 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1993 hci_add_acl_hdr(skb, conn->handle, flags);
1995 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1997 __skb_queue_tail(&conn->data_q, skb);
1998 } while (list);
2000 spin_unlock_bh(&conn->data_q.lock);
2003 tasklet_schedule(&hdev->tx_task);
2005 EXPORT_SYMBOL(hci_send_acl);
2007 /* Send SCO data */
2008 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2010 struct hci_dev *hdev = conn->hdev;
2011 struct hci_sco_hdr hdr;
2013 BT_DBG("%s len %d", hdev->name, skb->len);
2015 hdr.handle = cpu_to_le16(conn->handle);
2016 hdr.dlen = skb->len;
2018 skb_push(skb, HCI_SCO_HDR_SIZE);
2019 skb_reset_transport_header(skb);
2020 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2022 skb->dev = (void *) hdev;
2023 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2025 skb_queue_tail(&conn->data_q, skb);
2026 tasklet_schedule(&hdev->tx_task);
2028 EXPORT_SYMBOL(hci_send_sco);
2030 /* ---- HCI TX task (outgoing data) ---- */
2032 /* HCI Connection scheduler */
2033 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2035 struct hci_conn_hash *h = &hdev->conn_hash;
2036 struct hci_conn *conn = NULL;
2037 int num = 0, min = ~0;
2038 struct list_head *p;
2040 /* We don't have to lock device here. Connections are always
2041 * added and removed with TX task disabled. */
2042 list_for_each(p, &h->list) {
2043 struct hci_conn *c;
2044 c = list_entry(p, struct hci_conn, list);
2046 if (c->type != type || skb_queue_empty(&c->data_q))
2047 continue;
2049 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2050 continue;
2052 num++;
2054 if (c->sent < min) {
2055 min = c->sent;
2056 conn = c;
2059 if (hci_conn_num(hdev, type) == num)
2060 break;
2063 if (conn) {
2064 int cnt, q;
2066 switch (conn->type) {
2067 case ACL_LINK:
2068 cnt = hdev->acl_cnt;
2069 break;
2070 case SCO_LINK:
2071 case ESCO_LINK:
2072 cnt = hdev->sco_cnt;
2073 break;
2074 case LE_LINK:
2075 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2076 break;
2077 default:
2078 cnt = 0;
2079 BT_ERR("Unknown link type");
2082 q = cnt / num;
2083 *quote = q ? q : 1;
2084 } else
2085 *quote = 0;
2087 BT_DBG("conn %p quote %d", conn, *quote);
2088 return conn;
2091 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2093 struct hci_conn_hash *h = &hdev->conn_hash;
2094 struct list_head *p;
2095 struct hci_conn *c;
2097 BT_ERR("%s link tx timeout", hdev->name);
2099 /* Kill stalled connections */
2100 list_for_each(p, &h->list) {
2101 c = list_entry(p, struct hci_conn, list);
2102 if (c->type == type && c->sent) {
2103 BT_ERR("%s killing stalled connection %s",
2104 hdev->name, batostr(&c->dst));
2105 hci_acl_disconn(c, 0x13);
2110 static inline void hci_sched_acl(struct hci_dev *hdev)
2112 struct hci_conn *conn;
2113 struct sk_buff *skb;
2114 int quote;
2116 BT_DBG("%s", hdev->name);
2118 if (!hci_conn_num(hdev, ACL_LINK))
2119 return;
2121 if (!test_bit(HCI_RAW, &hdev->flags)) {
2122 /* ACL tx timeout must be longer than maximum
2123 * link supervision timeout (40.9 seconds) */
2124 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2125 hci_link_tx_to(hdev, ACL_LINK);
2128 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2129 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2130 BT_DBG("skb %p len %d", skb, skb->len);
2132 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2134 hci_send_frame(skb);
2135 hdev->acl_last_tx = jiffies;
2137 hdev->acl_cnt--;
2138 conn->sent++;
2143 /* Schedule SCO */
2144 static inline void hci_sched_sco(struct hci_dev *hdev)
2146 struct hci_conn *conn;
2147 struct sk_buff *skb;
2148 int quote;
2150 BT_DBG("%s", hdev->name);
2152 if (!hci_conn_num(hdev, SCO_LINK))
2153 return;
2155 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2156 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2157 BT_DBG("skb %p len %d", skb, skb->len);
2158 hci_send_frame(skb);
2160 conn->sent++;
2161 if (conn->sent == ~0)
2162 conn->sent = 0;
2167 static inline void hci_sched_esco(struct hci_dev *hdev)
2169 struct hci_conn *conn;
2170 struct sk_buff *skb;
2171 int quote;
2173 BT_DBG("%s", hdev->name);
2175 if (!hci_conn_num(hdev, ESCO_LINK))
2176 return;
2178 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2179 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2180 BT_DBG("skb %p len %d", skb, skb->len);
2181 hci_send_frame(skb);
2183 conn->sent++;
2184 if (conn->sent == ~0)
2185 conn->sent = 0;
2190 static inline void hci_sched_le(struct hci_dev *hdev)
2192 struct hci_conn *conn;
2193 struct sk_buff *skb;
2194 int quote, cnt;
2196 BT_DBG("%s", hdev->name);
2198 if (!hci_conn_num(hdev, LE_LINK))
2199 return;
2201 if (!test_bit(HCI_RAW, &hdev->flags)) {
2202 /* LE tx timeout must be longer than maximum
2203 * link supervision timeout (40.9 seconds) */
2204 if (!hdev->le_cnt && hdev->le_pkts &&
2205 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2206 hci_link_tx_to(hdev, LE_LINK);
2209 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2210 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2211 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2212 BT_DBG("skb %p len %d", skb, skb->len);
2214 hci_send_frame(skb);
2215 hdev->le_last_tx = jiffies;
2217 cnt--;
2218 conn->sent++;
2221 if (hdev->le_pkts)
2222 hdev->le_cnt = cnt;
2223 else
2224 hdev->acl_cnt = cnt;
2227 static void hci_tx_task(unsigned long arg)
2229 struct hci_dev *hdev = (struct hci_dev *) arg;
2230 struct sk_buff *skb;
2232 read_lock(&hci_task_lock);
2234 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2235 hdev->sco_cnt, hdev->le_cnt);
2237 /* Schedule queues and send stuff to HCI driver */
2239 hci_sched_acl(hdev);
2241 hci_sched_sco(hdev);
2243 hci_sched_esco(hdev);
2245 hci_sched_le(hdev);
2247 /* Send next queued raw (unknown type) packet */
2248 while ((skb = skb_dequeue(&hdev->raw_q)))
2249 hci_send_frame(skb);
2251 read_unlock(&hci_task_lock);
2254 /* ----- HCI RX task (incoming data processing) ----- */
2256 /* ACL data packet */
2257 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2259 struct hci_acl_hdr *hdr = (void *) skb->data;
2260 struct hci_conn *conn;
2261 __u16 handle, flags;
2263 skb_pull(skb, HCI_ACL_HDR_SIZE);
2265 handle = __le16_to_cpu(hdr->handle);
2266 flags = hci_flags(handle);
2267 handle = hci_handle(handle);
2269 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2271 hdev->stat.acl_rx++;
2273 hci_dev_lock(hdev);
2274 conn = hci_conn_hash_lookup_handle(hdev, handle);
2275 hci_dev_unlock(hdev);
2277 if (conn) {
2278 register struct hci_proto *hp;
2280 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2282 /* Send to upper protocol */
2283 hp = hci_proto[HCI_PROTO_L2CAP];
2284 if (hp && hp->recv_acldata) {
2285 hp->recv_acldata(conn, skb, flags);
2286 return;
2288 } else {
2289 BT_ERR("%s ACL packet for unknown connection handle %d",
2290 hdev->name, handle);
2293 kfree_skb(skb);
2296 /* SCO data packet */
2297 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2299 struct hci_sco_hdr *hdr = (void *) skb->data;
2300 struct hci_conn *conn;
2301 __u16 handle;
2303 skb_pull(skb, HCI_SCO_HDR_SIZE);
2305 handle = __le16_to_cpu(hdr->handle);
2307 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2309 hdev->stat.sco_rx++;
2311 hci_dev_lock(hdev);
2312 conn = hci_conn_hash_lookup_handle(hdev, handle);
2313 hci_dev_unlock(hdev);
2315 if (conn) {
2316 register struct hci_proto *hp;
2318 /* Send to upper protocol */
2319 hp = hci_proto[HCI_PROTO_SCO];
2320 if (hp && hp->recv_scodata) {
2321 hp->recv_scodata(conn, skb);
2322 return;
2324 } else {
2325 BT_ERR("%s SCO packet for unknown connection handle %d",
2326 hdev->name, handle);
2329 kfree_skb(skb);
2332 static void hci_rx_task(unsigned long arg)
2334 struct hci_dev *hdev = (struct hci_dev *) arg;
2335 struct sk_buff *skb;
2337 BT_DBG("%s", hdev->name);
2339 read_lock(&hci_task_lock);
2341 while ((skb = skb_dequeue(&hdev->rx_q))) {
2342 if (atomic_read(&hdev->promisc)) {
2343 /* Send copy to the sockets */
2344 hci_send_to_sock(hdev, skb, NULL);
2347 if (test_bit(HCI_RAW, &hdev->flags)) {
2348 kfree_skb(skb);
2349 continue;
2352 if (test_bit(HCI_INIT, &hdev->flags)) {
2353 /* Don't process data packets in this states. */
2354 switch (bt_cb(skb)->pkt_type) {
2355 case HCI_ACLDATA_PKT:
2356 case HCI_SCODATA_PKT:
2357 kfree_skb(skb);
2358 continue;
2362 /* Process frame */
2363 switch (bt_cb(skb)->pkt_type) {
2364 case HCI_EVENT_PKT:
2365 hci_event_packet(hdev, skb);
2366 break;
2368 case HCI_ACLDATA_PKT:
2369 BT_DBG("%s ACL data packet", hdev->name);
2370 hci_acldata_packet(hdev, skb);
2371 break;
2373 case HCI_SCODATA_PKT:
2374 BT_DBG("%s SCO data packet", hdev->name);
2375 hci_scodata_packet(hdev, skb);
2376 break;
2378 default:
2379 kfree_skb(skb);
2380 break;
2384 read_unlock(&hci_task_lock);
2387 static void hci_cmd_task(unsigned long arg)
2389 struct hci_dev *hdev = (struct hci_dev *) arg;
2390 struct sk_buff *skb;
2392 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2394 /* Send queued commands */
2395 if (atomic_read(&hdev->cmd_cnt)) {
2396 skb = skb_dequeue(&hdev->cmd_q);
2397 if (!skb)
2398 return;
2400 kfree_skb(hdev->sent_cmd);
2402 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2403 if (hdev->sent_cmd) {
2404 atomic_dec(&hdev->cmd_cnt);
2405 hci_send_frame(skb);
2406 if (test_bit(HCI_RESET, &hdev->flags))
2407 del_timer(&hdev->cmd_timer);
2408 else
2409 mod_timer(&hdev->cmd_timer,
2410 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2411 } else {
2412 skb_queue_head(&hdev->cmd_q, skb);
2413 tasklet_schedule(&hdev->cmd_task);