Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / net / bluetooth / hci_core.c
blob9a014511223c7a67b8a9ee51d61c6b8290934714
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
58 bool enable_hs;
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
129 BT_DBG("%s start", hdev->name);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
136 req(hdev, opt);
137 schedule_timeout(timeout);
139 remove_wait_queue(&hdev->req_wait_q, &wait);
141 if (signal_pending(current))
142 return -EINTR;
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
153 default:
154 err = -ETIMEDOUT;
155 break;
158 hdev->req_status = hdev->req_result = 0;
160 BT_DBG("%s end: err %d", hdev->name, err);
162 return err;
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
168 int ret;
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
178 return ret;
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183 BT_DBG("%s %ld", hdev->name, opt);
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 static void bredr_init(struct hci_dev *hdev)
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 /* Mandatory initialization */
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
242 static void amp_init(struct hci_dev *hdev)
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
255 struct sk_buff *skb;
257 BT_DBG("%s %ld", hdev->name, opt);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
269 skb_queue_purge(&hdev->driver_init);
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s", hdev->name);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
297 __u8 scan = opt;
299 BT_DBG("%s %x", hdev->name, scan);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
307 __u8 auth = opt;
309 BT_DBG("%s %x", hdev->name, auth);
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
317 __u8 encrypt = opt;
319 BT_DBG("%s %x", hdev->name, encrypt);
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
327 __le16 policy = cpu_to_le16(opt);
329 BT_DBG("%s %x", hdev->name, policy);
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
339 struct hci_dev *hdev = NULL, *d;
341 BT_DBG("%d", index);
343 if (index < 0)
344 return NULL;
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev *hdev)
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *next = cache->list, *e;
363 BT_DBG("cache %p", cache);
365 cache->list = NULL;
366 while ((e = next)) {
367 next = e->next;
368 kfree(e);
372 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
379 for (e = cache->list; e; e = e->next)
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 break;
382 return e;
385 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
387 struct inquiry_cache *cache = &hdev->inq_cache;
388 struct inquiry_entry *ie;
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) {
394 /* Entry not in the cache. Add new one. */
395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
396 if (!ie)
397 return;
399 ie->next = cache->list;
400 cache->list = ie;
403 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies;
405 cache->timestamp = jiffies;
408 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
410 struct inquiry_cache *cache = &hdev->inq_cache;
411 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e;
413 int copied = 0;
415 for (e = cache->list; e && copied < num; e = e->next, copied++) {
416 struct inquiry_data *data = &e->data;
417 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset;
423 info++;
426 BT_DBG("cache %p, copied %d", cache, copied);
427 return copied;
430 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
432 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
433 struct hci_cp_inquiry cp;
435 BT_DBG("%s", hdev->name);
437 if (test_bit(HCI_INQUIRY, &hdev->flags))
438 return;
440 /* Start Inquiry */
441 memcpy(&cp.lap, &ir->lap, 3);
442 cp.length = ir->length;
443 cp.num_rsp = ir->num_rsp;
444 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
447 int hci_inquiry(void __user *arg)
449 __u8 __user *ptr = arg;
450 struct hci_inquiry_req ir;
451 struct hci_dev *hdev;
452 int err = 0, do_inquiry = 0, max_rsp;
453 long timeo;
454 __u8 *buf;
456 if (copy_from_user(&ir, ptr, sizeof(ir)))
457 return -EFAULT;
459 hdev = hci_dev_get(ir.dev_id);
460 if (!hdev)
461 return -ENODEV;
463 hci_dev_lock(hdev);
464 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
465 inquiry_cache_empty(hdev) ||
466 ir.flags & IREQ_CACHE_FLUSH) {
467 inquiry_cache_flush(hdev);
468 do_inquiry = 1;
470 hci_dev_unlock(hdev);
472 timeo = ir.length * msecs_to_jiffies(2000);
474 if (do_inquiry) {
475 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
476 if (err < 0)
477 goto done;
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
486 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
487 if (!buf) {
488 err = -ENOMEM;
489 goto done;
492 hci_dev_lock(hdev);
493 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
494 hci_dev_unlock(hdev);
496 BT_DBG("num_rsp %d", ir.num_rsp);
498 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
499 ptr += sizeof(ir);
500 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
501 ir.num_rsp))
502 err = -EFAULT;
503 } else
504 err = -EFAULT;
506 kfree(buf);
508 done:
509 hci_dev_put(hdev);
510 return err;
513 /* ---- HCI ioctl helpers ---- */
515 int hci_dev_open(__u16 dev)
517 struct hci_dev *hdev;
518 int ret = 0;
520 hdev = hci_dev_get(dev);
521 if (!hdev)
522 return -ENODEV;
524 BT_DBG("%s %p", hdev->name, hdev);
526 hci_req_lock(hdev);
528 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
529 ret = -ENODEV;
530 goto done;
533 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
534 ret = -ERFKILL;
535 goto done;
538 if (test_bit(HCI_UP, &hdev->flags)) {
539 ret = -EALREADY;
540 goto done;
543 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
544 set_bit(HCI_RAW, &hdev->flags);
546 /* Treat all non BR/EDR controllers as raw devices if
547 enable_hs is not set */
548 if (hdev->dev_type != HCI_BREDR && !enable_hs)
549 set_bit(HCI_RAW, &hdev->flags);
551 if (hdev->open(hdev)) {
552 ret = -EIO;
553 goto done;
556 if (!test_bit(HCI_RAW, &hdev->flags)) {
557 atomic_set(&hdev->cmd_cnt, 1);
558 set_bit(HCI_INIT, &hdev->flags);
559 hdev->init_last_cmd = 0;
561 ret = __hci_request(hdev, hci_init_req, 0,
562 msecs_to_jiffies(HCI_INIT_TIMEOUT));
564 if (lmp_host_le_capable(hdev))
565 ret = __hci_request(hdev, hci_le_init_req, 0,
566 msecs_to_jiffies(HCI_INIT_TIMEOUT));
568 clear_bit(HCI_INIT, &hdev->flags);
571 if (!ret) {
572 hci_dev_hold(hdev);
573 set_bit(HCI_UP, &hdev->flags);
574 hci_notify(hdev, HCI_DEV_UP);
575 if (!test_bit(HCI_SETUP, &hdev->flags)) {
576 hci_dev_lock(hdev);
577 mgmt_powered(hdev, 1);
578 hci_dev_unlock(hdev);
580 } else {
581 /* Init failed, cleanup */
582 flush_work(&hdev->tx_work);
583 flush_work(&hdev->cmd_work);
584 flush_work(&hdev->rx_work);
586 skb_queue_purge(&hdev->cmd_q);
587 skb_queue_purge(&hdev->rx_q);
589 if (hdev->flush)
590 hdev->flush(hdev);
592 if (hdev->sent_cmd) {
593 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL;
597 hdev->close(hdev);
598 hdev->flags = 0;
601 done:
602 hci_req_unlock(hdev);
603 hci_dev_put(hdev);
604 return ret;
607 static int hci_dev_do_close(struct hci_dev *hdev)
609 BT_DBG("%s %p", hdev->name, hdev);
611 hci_req_cancel(hdev, ENODEV);
612 hci_req_lock(hdev);
614 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
615 del_timer_sync(&hdev->cmd_timer);
616 hci_req_unlock(hdev);
617 return 0;
620 /* Flush RX and TX works */
621 flush_work(&hdev->tx_work);
622 flush_work(&hdev->rx_work);
624 if (hdev->discov_timeout > 0) {
625 cancel_delayed_work(&hdev->discov_off);
626 hdev->discov_timeout = 0;
629 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
630 cancel_delayed_work(&hdev->power_off);
632 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
633 cancel_delayed_work(&hdev->service_cache);
635 hci_dev_lock(hdev);
636 inquiry_cache_flush(hdev);
637 hci_conn_hash_flush(hdev);
638 hci_dev_unlock(hdev);
640 hci_notify(hdev, HCI_DEV_DOWN);
642 if (hdev->flush)
643 hdev->flush(hdev);
645 /* Reset device */
646 skb_queue_purge(&hdev->cmd_q);
647 atomic_set(&hdev->cmd_cnt, 1);
648 if (!test_bit(HCI_RAW, &hdev->flags) &&
649 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
650 set_bit(HCI_INIT, &hdev->flags);
651 __hci_request(hdev, hci_reset_req, 0,
652 msecs_to_jiffies(250));
653 clear_bit(HCI_INIT, &hdev->flags);
656 /* flush cmd work */
657 flush_work(&hdev->cmd_work);
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662 skb_queue_purge(&hdev->raw_q);
664 /* Drop last sent command */
665 if (hdev->sent_cmd) {
666 del_timer_sync(&hdev->cmd_timer);
667 kfree_skb(hdev->sent_cmd);
668 hdev->sent_cmd = NULL;
671 /* After this point our queues are empty
672 * and no tasks are scheduled. */
673 hdev->close(hdev);
675 hci_dev_lock(hdev);
676 mgmt_powered(hdev, 0);
677 hci_dev_unlock(hdev);
679 /* Clear flags */
680 hdev->flags = 0;
682 hci_req_unlock(hdev);
684 hci_dev_put(hdev);
685 return 0;
688 int hci_dev_close(__u16 dev)
690 struct hci_dev *hdev;
691 int err;
693 hdev = hci_dev_get(dev);
694 if (!hdev)
695 return -ENODEV;
696 err = hci_dev_do_close(hdev);
697 hci_dev_put(hdev);
698 return err;
701 int hci_dev_reset(__u16 dev)
703 struct hci_dev *hdev;
704 int ret = 0;
706 hdev = hci_dev_get(dev);
707 if (!hdev)
708 return -ENODEV;
710 hci_req_lock(hdev);
712 if (!test_bit(HCI_UP, &hdev->flags))
713 goto done;
715 /* Drop queues */
716 skb_queue_purge(&hdev->rx_q);
717 skb_queue_purge(&hdev->cmd_q);
719 hci_dev_lock(hdev);
720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
722 hci_dev_unlock(hdev);
724 if (hdev->flush)
725 hdev->flush(hdev);
727 atomic_set(&hdev->cmd_cnt, 1);
728 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
730 if (!test_bit(HCI_RAW, &hdev->flags))
731 ret = __hci_request(hdev, hci_reset_req, 0,
732 msecs_to_jiffies(HCI_INIT_TIMEOUT));
734 done:
735 hci_req_unlock(hdev);
736 hci_dev_put(hdev);
737 return ret;
740 int hci_dev_reset_stat(__u16 dev)
742 struct hci_dev *hdev;
743 int ret = 0;
745 hdev = hci_dev_get(dev);
746 if (!hdev)
747 return -ENODEV;
749 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
751 hci_dev_put(hdev);
753 return ret;
756 int hci_dev_cmd(unsigned int cmd, void __user *arg)
758 struct hci_dev *hdev;
759 struct hci_dev_req dr;
760 int err = 0;
762 if (copy_from_user(&dr, arg, sizeof(dr)))
763 return -EFAULT;
765 hdev = hci_dev_get(dr.dev_id);
766 if (!hdev)
767 return -ENODEV;
769 switch (cmd) {
770 case HCISETAUTH:
771 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
772 msecs_to_jiffies(HCI_INIT_TIMEOUT));
773 break;
775 case HCISETENCRYPT:
776 if (!lmp_encrypt_capable(hdev)) {
777 err = -EOPNOTSUPP;
778 break;
781 if (!test_bit(HCI_AUTH, &hdev->flags)) {
782 /* Auth must be enabled first */
783 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT));
785 if (err)
786 break;
789 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT));
791 break;
793 case HCISETSCAN:
794 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
796 break;
798 case HCISETLINKPOL:
799 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
800 msecs_to_jiffies(HCI_INIT_TIMEOUT));
801 break;
803 case HCISETLINKMODE:
804 hdev->link_mode = ((__u16) dr.dev_opt) &
805 (HCI_LM_MASTER | HCI_LM_ACCEPT);
806 break;
808 case HCISETPTYPE:
809 hdev->pkt_type = (__u16) dr.dev_opt;
810 break;
812 case HCISETACLMTU:
813 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
814 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
815 break;
817 case HCISETSCOMTU:
818 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
819 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
820 break;
822 default:
823 err = -EINVAL;
824 break;
827 hci_dev_put(hdev);
828 return err;
831 int hci_get_dev_list(void __user *arg)
833 struct hci_dev *hdev;
834 struct hci_dev_list_req *dl;
835 struct hci_dev_req *dr;
836 int n = 0, size, err;
837 __u16 dev_num;
839 if (get_user(dev_num, (__u16 __user *) arg))
840 return -EFAULT;
842 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
843 return -EINVAL;
845 size = sizeof(*dl) + dev_num * sizeof(*dr);
847 dl = kzalloc(size, GFP_KERNEL);
848 if (!dl)
849 return -ENOMEM;
851 dr = dl->dev_req;
853 read_lock(&hci_dev_list_lock);
854 list_for_each_entry(hdev, &hci_dev_list, list) {
855 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
856 cancel_delayed_work(&hdev->power_off);
858 if (!test_bit(HCI_MGMT, &hdev->flags))
859 set_bit(HCI_PAIRABLE, &hdev->flags);
861 (dr + n)->dev_id = hdev->id;
862 (dr + n)->dev_opt = hdev->flags;
864 if (++n >= dev_num)
865 break;
867 read_unlock(&hci_dev_list_lock);
869 dl->dev_num = n;
870 size = sizeof(*dl) + n * sizeof(*dr);
872 err = copy_to_user(arg, dl, size);
873 kfree(dl);
875 return err ? -EFAULT : 0;
878 int hci_get_dev_info(void __user *arg)
880 struct hci_dev *hdev;
881 struct hci_dev_info di;
882 int err = 0;
884 if (copy_from_user(&di, arg, sizeof(di)))
885 return -EFAULT;
887 hdev = hci_dev_get(di.dev_id);
888 if (!hdev)
889 return -ENODEV;
891 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
892 cancel_delayed_work_sync(&hdev->power_off);
894 if (!test_bit(HCI_MGMT, &hdev->flags))
895 set_bit(HCI_PAIRABLE, &hdev->flags);
897 strcpy(di.name, hdev->name);
898 di.bdaddr = hdev->bdaddr;
899 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
900 di.flags = hdev->flags;
901 di.pkt_type = hdev->pkt_type;
902 di.acl_mtu = hdev->acl_mtu;
903 di.acl_pkts = hdev->acl_pkts;
904 di.sco_mtu = hdev->sco_mtu;
905 di.sco_pkts = hdev->sco_pkts;
906 di.link_policy = hdev->link_policy;
907 di.link_mode = hdev->link_mode;
909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910 memcpy(&di.features, &hdev->features, sizeof(di.features));
912 if (copy_to_user(arg, &di, sizeof(di)))
913 err = -EFAULT;
915 hci_dev_put(hdev);
917 return err;
920 /* ---- Interface to HCI drivers ---- */
922 static int hci_rfkill_set_block(void *data, bool blocked)
924 struct hci_dev *hdev = data;
926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
928 if (!blocked)
929 return 0;
931 hci_dev_do_close(hdev);
933 return 0;
936 static const struct rfkill_ops hci_rfkill_ops = {
937 .set_block = hci_rfkill_set_block,
940 /* Alloc HCI device */
941 struct hci_dev *hci_alloc_dev(void)
943 struct hci_dev *hdev;
945 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
946 if (!hdev)
947 return NULL;
949 hci_init_sysfs(hdev);
950 skb_queue_head_init(&hdev->driver_init);
952 return hdev;
954 EXPORT_SYMBOL(hci_alloc_dev);
956 /* Free HCI device */
957 void hci_free_dev(struct hci_dev *hdev)
959 skb_queue_purge(&hdev->driver_init);
961 /* will free via device release */
962 put_device(&hdev->dev);
964 EXPORT_SYMBOL(hci_free_dev);
966 static void hci_power_on(struct work_struct *work)
968 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
970 BT_DBG("%s", hdev->name);
972 if (hci_dev_open(hdev->id) < 0)
973 return;
975 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
976 schedule_delayed_work(&hdev->power_off,
977 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
979 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
980 mgmt_index_added(hdev);
983 static void hci_power_off(struct work_struct *work)
985 struct hci_dev *hdev = container_of(work, struct hci_dev,
986 power_off.work);
988 BT_DBG("%s", hdev->name);
990 clear_bit(HCI_AUTO_OFF, &hdev->flags);
992 hci_dev_close(hdev->id);
995 static void hci_discov_off(struct work_struct *work)
997 struct hci_dev *hdev;
998 u8 scan = SCAN_PAGE;
1000 hdev = container_of(work, struct hci_dev, discov_off.work);
1002 BT_DBG("%s", hdev->name);
1004 hci_dev_lock(hdev);
1006 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1008 hdev->discov_timeout = 0;
1010 hci_dev_unlock(hdev);
1013 int hci_uuids_clear(struct hci_dev *hdev)
1015 struct list_head *p, *n;
1017 list_for_each_safe(p, n, &hdev->uuids) {
1018 struct bt_uuid *uuid;
1020 uuid = list_entry(p, struct bt_uuid, list);
1022 list_del(p);
1023 kfree(uuid);
1026 return 0;
1029 int hci_link_keys_clear(struct hci_dev *hdev)
1031 struct list_head *p, *n;
1033 list_for_each_safe(p, n, &hdev->link_keys) {
1034 struct link_key *key;
1036 key = list_entry(p, struct link_key, list);
1038 list_del(p);
1039 kfree(key);
1042 return 0;
1045 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1047 struct link_key *k;
1049 list_for_each_entry(k, &hdev->link_keys, list)
1050 if (bacmp(bdaddr, &k->bdaddr) == 0)
1051 return k;
1053 return NULL;
1056 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1057 u8 key_type, u8 old_key_type)
1059 /* Legacy key */
1060 if (key_type < 0x03)
1061 return 1;
1063 /* Debug keys are insecure so don't store them persistently */
1064 if (key_type == HCI_LK_DEBUG_COMBINATION)
1065 return 0;
1067 /* Changed combination key and there's no previous one */
1068 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1069 return 0;
1071 /* Security mode 3 case */
1072 if (!conn)
1073 return 1;
1075 /* Neither local nor remote side had no-bonding as requirement */
1076 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1077 return 1;
1079 /* Local side had dedicated bonding as requirement */
1080 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1081 return 1;
1083 /* Remote side had dedicated bonding as requirement */
1084 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1085 return 1;
1087 /* If none of the above criteria match, then don't store the key
1088 * persistently */
1089 return 0;
1092 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1094 struct link_key *k;
1096 list_for_each_entry(k, &hdev->link_keys, list) {
1097 struct key_master_id *id;
1099 if (k->type != HCI_LK_SMP_LTK)
1100 continue;
1102 if (k->dlen != sizeof(*id))
1103 continue;
1105 id = (void *) &k->data;
1106 if (id->ediv == ediv &&
1107 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1108 return k;
1111 return NULL;
1113 EXPORT_SYMBOL(hci_find_ltk);
1115 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1116 bdaddr_t *bdaddr, u8 type)
1118 struct link_key *k;
1120 list_for_each_entry(k, &hdev->link_keys, list)
1121 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1122 return k;
1124 return NULL;
1126 EXPORT_SYMBOL(hci_find_link_key_type);
1128 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1129 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1131 struct link_key *key, *old_key;
1132 u8 old_key_type, persistent;
1134 old_key = hci_find_link_key(hdev, bdaddr);
1135 if (old_key) {
1136 old_key_type = old_key->type;
1137 key = old_key;
1138 } else {
1139 old_key_type = conn ? conn->key_type : 0xff;
1140 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1141 if (!key)
1142 return -ENOMEM;
1143 list_add(&key->list, &hdev->link_keys);
1146 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1148 /* Some buggy controller combinations generate a changed
1149 * combination key for legacy pairing even when there's no
1150 * previous key */
1151 if (type == HCI_LK_CHANGED_COMBINATION &&
1152 (!conn || conn->remote_auth == 0xff) &&
1153 old_key_type == 0xff) {
1154 type = HCI_LK_COMBINATION;
1155 if (conn)
1156 conn->key_type = type;
1159 bacpy(&key->bdaddr, bdaddr);
1160 memcpy(key->val, val, 16);
1161 key->pin_len = pin_len;
1163 if (type == HCI_LK_CHANGED_COMBINATION)
1164 key->type = old_key_type;
1165 else
1166 key->type = type;
1168 if (!new_key)
1169 return 0;
1171 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1173 mgmt_new_link_key(hdev, key, persistent);
1175 if (!persistent) {
1176 list_del(&key->list);
1177 kfree(key);
1180 return 0;
1183 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1184 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1186 struct link_key *key, *old_key;
1187 struct key_master_id *id;
1188 u8 old_key_type;
1190 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1192 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1193 if (old_key) {
1194 key = old_key;
1195 old_key_type = old_key->type;
1196 } else {
1197 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1198 if (!key)
1199 return -ENOMEM;
1200 list_add(&key->list, &hdev->link_keys);
1201 old_key_type = 0xff;
1204 key->dlen = sizeof(*id);
1206 bacpy(&key->bdaddr, bdaddr);
1207 memcpy(key->val, ltk, sizeof(key->val));
1208 key->type = HCI_LK_SMP_LTK;
1209 key->pin_len = key_size;
1211 id = (void *) &key->data;
1212 id->ediv = ediv;
1213 memcpy(id->rand, rand, sizeof(id->rand));
1215 if (new_key)
1216 mgmt_new_link_key(hdev, key, old_key_type);
1218 return 0;
1221 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223 struct link_key *key;
1225 key = hci_find_link_key(hdev, bdaddr);
1226 if (!key)
1227 return -ENOENT;
1229 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1231 list_del(&key->list);
1232 kfree(key);
1234 return 0;
1237 /* HCI command timer function */
1238 static void hci_cmd_timer(unsigned long arg)
1240 struct hci_dev *hdev = (void *) arg;
1242 BT_ERR("%s command tx timeout", hdev->name);
1243 atomic_set(&hdev->cmd_cnt, 1);
1244 queue_work(hdev->workqueue, &hdev->cmd_work);
1247 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1248 bdaddr_t *bdaddr)
1250 struct oob_data *data;
1252 list_for_each_entry(data, &hdev->remote_oob_data, list)
1253 if (bacmp(bdaddr, &data->bdaddr) == 0)
1254 return data;
1256 return NULL;
1259 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1261 struct oob_data *data;
1263 data = hci_find_remote_oob_data(hdev, bdaddr);
1264 if (!data)
1265 return -ENOENT;
1267 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1269 list_del(&data->list);
1270 kfree(data);
1272 return 0;
1275 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1277 struct oob_data *data, *n;
1279 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1280 list_del(&data->list);
1281 kfree(data);
1284 return 0;
1287 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1288 u8 *randomizer)
1290 struct oob_data *data;
1292 data = hci_find_remote_oob_data(hdev, bdaddr);
1294 if (!data) {
1295 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1296 if (!data)
1297 return -ENOMEM;
1299 bacpy(&data->bdaddr, bdaddr);
1300 list_add(&data->list, &hdev->remote_oob_data);
1303 memcpy(data->hash, hash, sizeof(data->hash));
1304 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1306 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1308 return 0;
1311 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1312 bdaddr_t *bdaddr)
1314 struct bdaddr_list *b;
1316 list_for_each_entry(b, &hdev->blacklist, list)
1317 if (bacmp(bdaddr, &b->bdaddr) == 0)
1318 return b;
1320 return NULL;
1323 int hci_blacklist_clear(struct hci_dev *hdev)
1325 struct list_head *p, *n;
1327 list_for_each_safe(p, n, &hdev->blacklist) {
1328 struct bdaddr_list *b;
1330 b = list_entry(p, struct bdaddr_list, list);
1332 list_del(p);
1333 kfree(b);
1336 return 0;
1339 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1341 struct bdaddr_list *entry;
1343 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1344 return -EBADF;
1346 if (hci_blacklist_lookup(hdev, bdaddr))
1347 return -EEXIST;
1349 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1350 if (!entry)
1351 return -ENOMEM;
1353 bacpy(&entry->bdaddr, bdaddr);
1355 list_add(&entry->list, &hdev->blacklist);
1357 return mgmt_device_blocked(hdev, bdaddr);
1360 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1362 struct bdaddr_list *entry;
1364 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1365 return hci_blacklist_clear(hdev);
1367 entry = hci_blacklist_lookup(hdev, bdaddr);
1368 if (!entry)
1369 return -ENOENT;
1371 list_del(&entry->list);
1372 kfree(entry);
1374 return mgmt_device_unblocked(hdev, bdaddr);
1377 static void hci_clear_adv_cache(struct work_struct *work)
1379 struct hci_dev *hdev = container_of(work, struct hci_dev,
1380 adv_work.work);
1382 hci_dev_lock(hdev);
1384 hci_adv_entries_clear(hdev);
1386 hci_dev_unlock(hdev);
1389 int hci_adv_entries_clear(struct hci_dev *hdev)
1391 struct adv_entry *entry, *tmp;
1393 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1394 list_del(&entry->list);
1395 kfree(entry);
1398 BT_DBG("%s adv cache cleared", hdev->name);
1400 return 0;
1403 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1405 struct adv_entry *entry;
1407 list_for_each_entry(entry, &hdev->adv_entries, list)
1408 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1409 return entry;
1411 return NULL;
1414 static inline int is_connectable_adv(u8 evt_type)
1416 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1417 return 1;
1419 return 0;
1422 int hci_add_adv_entry(struct hci_dev *hdev,
1423 struct hci_ev_le_advertising_info *ev)
1425 struct adv_entry *entry;
1427 if (!is_connectable_adv(ev->evt_type))
1428 return -EINVAL;
1430 /* Only new entries should be added to adv_entries. So, if
1431 * bdaddr was found, don't add it. */
1432 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1433 return 0;
1435 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1436 if (!entry)
1437 return -ENOMEM;
1439 bacpy(&entry->bdaddr, &ev->bdaddr);
1440 entry->bdaddr_type = ev->bdaddr_type;
1442 list_add(&entry->list, &hdev->adv_entries);
1444 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1445 batostr(&entry->bdaddr), entry->bdaddr_type);
1447 return 0;
1450 /* Register HCI device */
1451 int hci_register_dev(struct hci_dev *hdev)
1453 struct list_head *head = &hci_dev_list, *p;
1454 int i, id, error;
1456 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1457 hdev->bus, hdev->owner);
1459 if (!hdev->open || !hdev->close || !hdev->destruct)
1460 return -EINVAL;
1462 /* Do not allow HCI_AMP devices to register at index 0,
1463 * so the index can be used as the AMP controller ID.
1465 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1467 write_lock(&hci_dev_list_lock);
1469 /* Find first available device id */
1470 list_for_each(p, &hci_dev_list) {
1471 if (list_entry(p, struct hci_dev, list)->id != id)
1472 break;
1473 head = p; id++;
1476 sprintf(hdev->name, "hci%d", id);
1477 hdev->id = id;
1478 list_add_tail(&hdev->list, head);
1480 atomic_set(&hdev->refcnt, 1);
1481 mutex_init(&hdev->lock);
1483 hdev->flags = 0;
1484 hdev->dev_flags = 0;
1485 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1486 hdev->esco_type = (ESCO_HV1);
1487 hdev->link_mode = (HCI_LM_ACCEPT);
1488 hdev->io_capability = 0x03; /* No Input No Output */
1490 hdev->idle_timeout = 0;
1491 hdev->sniff_max_interval = 800;
1492 hdev->sniff_min_interval = 80;
1494 INIT_WORK(&hdev->rx_work, hci_rx_work);
1495 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1496 INIT_WORK(&hdev->tx_work, hci_tx_work);
1499 skb_queue_head_init(&hdev->rx_q);
1500 skb_queue_head_init(&hdev->cmd_q);
1501 skb_queue_head_init(&hdev->raw_q);
1503 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1505 for (i = 0; i < NUM_REASSEMBLY; i++)
1506 hdev->reassembly[i] = NULL;
1508 init_waitqueue_head(&hdev->req_wait_q);
1509 mutex_init(&hdev->req_lock);
1511 inquiry_cache_init(hdev);
1513 hci_conn_hash_init(hdev);
1515 INIT_LIST_HEAD(&hdev->mgmt_pending);
1517 INIT_LIST_HEAD(&hdev->blacklist);
1519 INIT_LIST_HEAD(&hdev->uuids);
1521 INIT_LIST_HEAD(&hdev->link_keys);
1523 INIT_LIST_HEAD(&hdev->remote_oob_data);
1525 INIT_LIST_HEAD(&hdev->adv_entries);
1527 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1528 INIT_WORK(&hdev->power_on, hci_power_on);
1529 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1531 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1533 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1535 atomic_set(&hdev->promisc, 0);
1537 write_unlock(&hci_dev_list_lock);
1539 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1540 WQ_MEM_RECLAIM, 1);
1541 if (!hdev->workqueue) {
1542 error = -ENOMEM;
1543 goto err;
1546 error = hci_add_sysfs(hdev);
1547 if (error < 0)
1548 goto err_wqueue;
1550 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1551 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1552 if (hdev->rfkill) {
1553 if (rfkill_register(hdev->rfkill) < 0) {
1554 rfkill_destroy(hdev->rfkill);
1555 hdev->rfkill = NULL;
1559 set_bit(HCI_AUTO_OFF, &hdev->flags);
1560 set_bit(HCI_SETUP, &hdev->flags);
1561 schedule_work(&hdev->power_on);
1563 hci_notify(hdev, HCI_DEV_REG);
1565 return id;
1567 err_wqueue:
1568 destroy_workqueue(hdev->workqueue);
1569 err:
1570 write_lock(&hci_dev_list_lock);
1571 list_del(&hdev->list);
1572 write_unlock(&hci_dev_list_lock);
1574 return error;
1576 EXPORT_SYMBOL(hci_register_dev);
1578 /* Unregister HCI device */
1579 void hci_unregister_dev(struct hci_dev *hdev)
1581 int i;
1583 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1585 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1587 write_lock(&hci_dev_list_lock);
1588 list_del(&hdev->list);
1589 write_unlock(&hci_dev_list_lock);
1591 hci_dev_do_close(hdev);
1593 for (i = 0; i < NUM_REASSEMBLY; i++)
1594 kfree_skb(hdev->reassembly[i]);
1596 if (!test_bit(HCI_INIT, &hdev->flags) &&
1597 !test_bit(HCI_SETUP, &hdev->flags)) {
1598 hci_dev_lock(hdev);
1599 mgmt_index_removed(hdev);
1600 hci_dev_unlock(hdev);
1603 /* mgmt_index_removed should take care of emptying the
1604 * pending list */
1605 BUG_ON(!list_empty(&hdev->mgmt_pending));
1607 hci_notify(hdev, HCI_DEV_UNREG);
1609 if (hdev->rfkill) {
1610 rfkill_unregister(hdev->rfkill);
1611 rfkill_destroy(hdev->rfkill);
1614 hci_del_sysfs(hdev);
1616 cancel_delayed_work_sync(&hdev->adv_work);
1618 destroy_workqueue(hdev->workqueue);
1620 hci_dev_lock(hdev);
1621 hci_blacklist_clear(hdev);
1622 hci_uuids_clear(hdev);
1623 hci_link_keys_clear(hdev);
1624 hci_remote_oob_data_clear(hdev);
1625 hci_adv_entries_clear(hdev);
1626 hci_dev_unlock(hdev);
1628 __hci_dev_put(hdev);
1630 EXPORT_SYMBOL(hci_unregister_dev);
1632 /* Suspend HCI device */
1633 int hci_suspend_dev(struct hci_dev *hdev)
1635 hci_notify(hdev, HCI_DEV_SUSPEND);
1636 return 0;
1638 EXPORT_SYMBOL(hci_suspend_dev);
1640 /* Resume HCI device */
1641 int hci_resume_dev(struct hci_dev *hdev)
1643 hci_notify(hdev, HCI_DEV_RESUME);
1644 return 0;
1646 EXPORT_SYMBOL(hci_resume_dev);
1648 /* Receive frame from HCI drivers */
1649 int hci_recv_frame(struct sk_buff *skb)
1651 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1652 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1653 && !test_bit(HCI_INIT, &hdev->flags))) {
1654 kfree_skb(skb);
1655 return -ENXIO;
1658 /* Incomming skb */
1659 bt_cb(skb)->incoming = 1;
1661 /* Time stamp */
1662 __net_timestamp(skb);
1664 skb_queue_tail(&hdev->rx_q, skb);
1665 queue_work(hdev->workqueue, &hdev->rx_work);
1667 return 0;
1669 EXPORT_SYMBOL(hci_recv_frame);
1671 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1672 int count, __u8 index)
1674 int len = 0;
1675 int hlen = 0;
1676 int remain = count;
1677 struct sk_buff *skb;
1678 struct bt_skb_cb *scb;
1680 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1681 index >= NUM_REASSEMBLY)
1682 return -EILSEQ;
1684 skb = hdev->reassembly[index];
1686 if (!skb) {
1687 switch (type) {
1688 case HCI_ACLDATA_PKT:
1689 len = HCI_MAX_FRAME_SIZE;
1690 hlen = HCI_ACL_HDR_SIZE;
1691 break;
1692 case HCI_EVENT_PKT:
1693 len = HCI_MAX_EVENT_SIZE;
1694 hlen = HCI_EVENT_HDR_SIZE;
1695 break;
1696 case HCI_SCODATA_PKT:
1697 len = HCI_MAX_SCO_SIZE;
1698 hlen = HCI_SCO_HDR_SIZE;
1699 break;
1702 skb = bt_skb_alloc(len, GFP_ATOMIC);
1703 if (!skb)
1704 return -ENOMEM;
1706 scb = (void *) skb->cb;
1707 scb->expect = hlen;
1708 scb->pkt_type = type;
1710 skb->dev = (void *) hdev;
1711 hdev->reassembly[index] = skb;
1714 while (count) {
1715 scb = (void *) skb->cb;
1716 len = min(scb->expect, (__u16)count);
1718 memcpy(skb_put(skb, len), data, len);
1720 count -= len;
1721 data += len;
1722 scb->expect -= len;
1723 remain = count;
1725 switch (type) {
1726 case HCI_EVENT_PKT:
1727 if (skb->len == HCI_EVENT_HDR_SIZE) {
1728 struct hci_event_hdr *h = hci_event_hdr(skb);
1729 scb->expect = h->plen;
1731 if (skb_tailroom(skb) < scb->expect) {
1732 kfree_skb(skb);
1733 hdev->reassembly[index] = NULL;
1734 return -ENOMEM;
1737 break;
1739 case HCI_ACLDATA_PKT:
1740 if (skb->len == HCI_ACL_HDR_SIZE) {
1741 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1742 scb->expect = __le16_to_cpu(h->dlen);
1744 if (skb_tailroom(skb) < scb->expect) {
1745 kfree_skb(skb);
1746 hdev->reassembly[index] = NULL;
1747 return -ENOMEM;
1750 break;
1752 case HCI_SCODATA_PKT:
1753 if (skb->len == HCI_SCO_HDR_SIZE) {
1754 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1755 scb->expect = h->dlen;
1757 if (skb_tailroom(skb) < scb->expect) {
1758 kfree_skb(skb);
1759 hdev->reassembly[index] = NULL;
1760 return -ENOMEM;
1763 break;
1766 if (scb->expect == 0) {
1767 /* Complete frame */
1769 bt_cb(skb)->pkt_type = type;
1770 hci_recv_frame(skb);
1772 hdev->reassembly[index] = NULL;
1773 return remain;
1777 return remain;
1780 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1782 int rem = 0;
1784 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1785 return -EILSEQ;
1787 while (count) {
1788 rem = hci_reassembly(hdev, type, data, count, type - 1);
1789 if (rem < 0)
1790 return rem;
1792 data += (count - rem);
1793 count = rem;
1796 return rem;
1798 EXPORT_SYMBOL(hci_recv_fragment);
1800 #define STREAM_REASSEMBLY 0
1802 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1804 int type;
1805 int rem = 0;
1807 while (count) {
1808 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1810 if (!skb) {
1811 struct { char type; } *pkt;
1813 /* Start of the frame */
1814 pkt = data;
1815 type = pkt->type;
1817 data++;
1818 count--;
1819 } else
1820 type = bt_cb(skb)->pkt_type;
1822 rem = hci_reassembly(hdev, type, data, count,
1823 STREAM_REASSEMBLY);
1824 if (rem < 0)
1825 return rem;
1827 data += (count - rem);
1828 count = rem;
1831 return rem;
1833 EXPORT_SYMBOL(hci_recv_stream_fragment);
1835 /* ---- Interface to upper protocols ---- */
1837 int hci_register_cb(struct hci_cb *cb)
1839 BT_DBG("%p name %s", cb, cb->name);
1841 write_lock(&hci_cb_list_lock);
1842 list_add(&cb->list, &hci_cb_list);
1843 write_unlock(&hci_cb_list_lock);
1845 return 0;
1847 EXPORT_SYMBOL(hci_register_cb);
1849 int hci_unregister_cb(struct hci_cb *cb)
1851 BT_DBG("%p name %s", cb, cb->name);
1853 write_lock(&hci_cb_list_lock);
1854 list_del(&cb->list);
1855 write_unlock(&hci_cb_list_lock);
1857 return 0;
1859 EXPORT_SYMBOL(hci_unregister_cb);
1861 static int hci_send_frame(struct sk_buff *skb)
1863 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1865 if (!hdev) {
1866 kfree_skb(skb);
1867 return -ENODEV;
1870 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1872 if (atomic_read(&hdev->promisc)) {
1873 /* Time stamp */
1874 __net_timestamp(skb);
1876 hci_send_to_sock(hdev, skb, NULL);
1879 /* Get rid of skb owner, prior to sending to the driver. */
1880 skb_orphan(skb);
1882 return hdev->send(skb);
1885 /* Send HCI command */
1886 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1888 int len = HCI_COMMAND_HDR_SIZE + plen;
1889 struct hci_command_hdr *hdr;
1890 struct sk_buff *skb;
1892 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1894 skb = bt_skb_alloc(len, GFP_ATOMIC);
1895 if (!skb) {
1896 BT_ERR("%s no memory for command", hdev->name);
1897 return -ENOMEM;
1900 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1901 hdr->opcode = cpu_to_le16(opcode);
1902 hdr->plen = plen;
1904 if (plen)
1905 memcpy(skb_put(skb, plen), param, plen);
1907 BT_DBG("skb len %d", skb->len);
1909 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1910 skb->dev = (void *) hdev;
1912 if (test_bit(HCI_INIT, &hdev->flags))
1913 hdev->init_last_cmd = opcode;
1915 skb_queue_tail(&hdev->cmd_q, skb);
1916 queue_work(hdev->workqueue, &hdev->cmd_work);
1918 return 0;
1921 /* Get data from the previously sent command */
1922 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1924 struct hci_command_hdr *hdr;
1926 if (!hdev->sent_cmd)
1927 return NULL;
1929 hdr = (void *) hdev->sent_cmd->data;
1931 if (hdr->opcode != cpu_to_le16(opcode))
1932 return NULL;
1934 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1936 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1939 /* Send ACL data */
1940 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1942 struct hci_acl_hdr *hdr;
1943 int len = skb->len;
1945 skb_push(skb, HCI_ACL_HDR_SIZE);
1946 skb_reset_transport_header(skb);
1947 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1948 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1949 hdr->dlen = cpu_to_le16(len);
1952 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1953 struct sk_buff *skb, __u16 flags)
1955 struct hci_dev *hdev = conn->hdev;
1956 struct sk_buff *list;
1958 list = skb_shinfo(skb)->frag_list;
1959 if (!list) {
1960 /* Non fragmented */
1961 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1963 skb_queue_tail(queue, skb);
1964 } else {
1965 /* Fragmented */
1966 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1968 skb_shinfo(skb)->frag_list = NULL;
1970 /* Queue all fragments atomically */
1971 spin_lock(&queue->lock);
1973 __skb_queue_tail(queue, skb);
1975 flags &= ~ACL_START;
1976 flags |= ACL_CONT;
1977 do {
1978 skb = list; list = list->next;
1980 skb->dev = (void *) hdev;
1981 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1982 hci_add_acl_hdr(skb, conn->handle, flags);
1984 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1986 __skb_queue_tail(queue, skb);
1987 } while (list);
1989 spin_unlock(&queue->lock);
1993 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1995 struct hci_conn *conn = chan->conn;
1996 struct hci_dev *hdev = conn->hdev;
1998 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2000 skb->dev = (void *) hdev;
2001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2002 hci_add_acl_hdr(skb, conn->handle, flags);
2004 hci_queue_acl(conn, &chan->data_q, skb, flags);
2006 queue_work(hdev->workqueue, &hdev->tx_work);
2008 EXPORT_SYMBOL(hci_send_acl);
2010 /* Send SCO data */
2011 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2013 struct hci_dev *hdev = conn->hdev;
2014 struct hci_sco_hdr hdr;
2016 BT_DBG("%s len %d", hdev->name, skb->len);
2018 hdr.handle = cpu_to_le16(conn->handle);
2019 hdr.dlen = skb->len;
2021 skb_push(skb, HCI_SCO_HDR_SIZE);
2022 skb_reset_transport_header(skb);
2023 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2025 skb->dev = (void *) hdev;
2026 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2028 skb_queue_tail(&conn->data_q, skb);
2029 queue_work(hdev->workqueue, &hdev->tx_work);
2031 EXPORT_SYMBOL(hci_send_sco);
2033 /* ---- HCI TX task (outgoing data) ---- */
2035 /* HCI Connection scheduler */
2036 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2038 struct hci_conn_hash *h = &hdev->conn_hash;
2039 struct hci_conn *conn = NULL, *c;
2040 int num = 0, min = ~0;
2042 /* We don't have to lock device here. Connections are always
2043 * added and removed with TX task disabled. */
2045 rcu_read_lock();
2047 list_for_each_entry_rcu(c, &h->list, list) {
2048 if (c->type != type || skb_queue_empty(&c->data_q))
2049 continue;
2051 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2052 continue;
2054 num++;
2056 if (c->sent < min) {
2057 min = c->sent;
2058 conn = c;
2061 if (hci_conn_num(hdev, type) == num)
2062 break;
2065 rcu_read_unlock();
2067 if (conn) {
2068 int cnt, q;
2070 switch (conn->type) {
2071 case ACL_LINK:
2072 cnt = hdev->acl_cnt;
2073 break;
2074 case SCO_LINK:
2075 case ESCO_LINK:
2076 cnt = hdev->sco_cnt;
2077 break;
2078 case LE_LINK:
2079 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2080 break;
2081 default:
2082 cnt = 0;
2083 BT_ERR("Unknown link type");
2086 q = cnt / num;
2087 *quote = q ? q : 1;
2088 } else
2089 *quote = 0;
2091 BT_DBG("conn %p quote %d", conn, *quote);
2092 return conn;
2095 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2097 struct hci_conn_hash *h = &hdev->conn_hash;
2098 struct hci_conn *c;
2100 BT_ERR("%s link tx timeout", hdev->name);
2102 rcu_read_lock();
2104 /* Kill stalled connections */
2105 list_for_each_entry_rcu(c, &h->list, list) {
2106 if (c->type == type && c->sent) {
2107 BT_ERR("%s killing stalled connection %s",
2108 hdev->name, batostr(&c->dst));
2109 hci_acl_disconn(c, 0x13);
2113 rcu_read_unlock();
2116 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2117 int *quote)
2119 struct hci_conn_hash *h = &hdev->conn_hash;
2120 struct hci_chan *chan = NULL;
2121 int num = 0, min = ~0, cur_prio = 0;
2122 struct hci_conn *conn;
2123 int cnt, q, conn_num = 0;
2125 BT_DBG("%s", hdev->name);
2127 rcu_read_lock();
2129 list_for_each_entry_rcu(conn, &h->list, list) {
2130 struct hci_chan *tmp;
2132 if (conn->type != type)
2133 continue;
2135 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2136 continue;
2138 conn_num++;
2140 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2141 struct sk_buff *skb;
2143 if (skb_queue_empty(&tmp->data_q))
2144 continue;
2146 skb = skb_peek(&tmp->data_q);
2147 if (skb->priority < cur_prio)
2148 continue;
2150 if (skb->priority > cur_prio) {
2151 num = 0;
2152 min = ~0;
2153 cur_prio = skb->priority;
2156 num++;
2158 if (conn->sent < min) {
2159 min = conn->sent;
2160 chan = tmp;
2164 if (hci_conn_num(hdev, type) == conn_num)
2165 break;
2168 rcu_read_unlock();
2170 if (!chan)
2171 return NULL;
2173 switch (chan->conn->type) {
2174 case ACL_LINK:
2175 cnt = hdev->acl_cnt;
2176 break;
2177 case SCO_LINK:
2178 case ESCO_LINK:
2179 cnt = hdev->sco_cnt;
2180 break;
2181 case LE_LINK:
2182 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2183 break;
2184 default:
2185 cnt = 0;
2186 BT_ERR("Unknown link type");
2189 q = cnt / num;
2190 *quote = q ? q : 1;
2191 BT_DBG("chan %p quote %d", chan, *quote);
2192 return chan;
2195 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2197 struct hci_conn_hash *h = &hdev->conn_hash;
2198 struct hci_conn *conn;
2199 int num = 0;
2201 BT_DBG("%s", hdev->name);
2203 rcu_read_lock();
2205 list_for_each_entry_rcu(conn, &h->list, list) {
2206 struct hci_chan *chan;
2208 if (conn->type != type)
2209 continue;
2211 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2212 continue;
2214 num++;
2216 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2217 struct sk_buff *skb;
2219 if (chan->sent) {
2220 chan->sent = 0;
2221 continue;
2224 if (skb_queue_empty(&chan->data_q))
2225 continue;
2227 skb = skb_peek(&chan->data_q);
2228 if (skb->priority >= HCI_PRIO_MAX - 1)
2229 continue;
2231 skb->priority = HCI_PRIO_MAX - 1;
2233 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2234 skb->priority);
2237 if (hci_conn_num(hdev, type) == num)
2238 break;
2241 rcu_read_unlock();
2245 static inline void hci_sched_acl(struct hci_dev *hdev)
2247 struct hci_chan *chan;
2248 struct sk_buff *skb;
2249 int quote;
2250 unsigned int cnt;
2252 BT_DBG("%s", hdev->name);
2254 if (!hci_conn_num(hdev, ACL_LINK))
2255 return;
2257 if (!test_bit(HCI_RAW, &hdev->flags)) {
2258 /* ACL tx timeout must be longer than maximum
2259 * link supervision timeout (40.9 seconds) */
2260 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2261 hci_link_tx_to(hdev, ACL_LINK);
2264 cnt = hdev->acl_cnt;
2266 while (hdev->acl_cnt &&
2267 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2268 u32 priority = (skb_peek(&chan->data_q))->priority;
2269 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2270 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2271 skb->len, skb->priority);
2273 /* Stop if priority has changed */
2274 if (skb->priority < priority)
2275 break;
2277 skb = skb_dequeue(&chan->data_q);
2279 hci_conn_enter_active_mode(chan->conn,
2280 bt_cb(skb)->force_active);
2282 hci_send_frame(skb);
2283 hdev->acl_last_tx = jiffies;
2285 hdev->acl_cnt--;
2286 chan->sent++;
2287 chan->conn->sent++;
2291 if (cnt != hdev->acl_cnt)
2292 hci_prio_recalculate(hdev, ACL_LINK);
2295 /* Schedule SCO */
2296 static inline void hci_sched_sco(struct hci_dev *hdev)
2298 struct hci_conn *conn;
2299 struct sk_buff *skb;
2300 int quote;
2302 BT_DBG("%s", hdev->name);
2304 if (!hci_conn_num(hdev, SCO_LINK))
2305 return;
2307 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2308 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2309 BT_DBG("skb %p len %d", skb, skb->len);
2310 hci_send_frame(skb);
2312 conn->sent++;
2313 if (conn->sent == ~0)
2314 conn->sent = 0;
2319 static inline void hci_sched_esco(struct hci_dev *hdev)
2321 struct hci_conn *conn;
2322 struct sk_buff *skb;
2323 int quote;
2325 BT_DBG("%s", hdev->name);
2327 if (!hci_conn_num(hdev, ESCO_LINK))
2328 return;
2330 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2331 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2332 BT_DBG("skb %p len %d", skb, skb->len);
2333 hci_send_frame(skb);
2335 conn->sent++;
2336 if (conn->sent == ~0)
2337 conn->sent = 0;
2342 static inline void hci_sched_le(struct hci_dev *hdev)
2344 struct hci_chan *chan;
2345 struct sk_buff *skb;
2346 int quote, cnt, tmp;
2348 BT_DBG("%s", hdev->name);
2350 if (!hci_conn_num(hdev, LE_LINK))
2351 return;
2353 if (!test_bit(HCI_RAW, &hdev->flags)) {
2354 /* LE tx timeout must be longer than maximum
2355 * link supervision timeout (40.9 seconds) */
2356 if (!hdev->le_cnt && hdev->le_pkts &&
2357 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2358 hci_link_tx_to(hdev, LE_LINK);
2361 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2362 tmp = cnt;
2363 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2364 u32 priority = (skb_peek(&chan->data_q))->priority;
2365 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2366 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2367 skb->len, skb->priority);
2369 /* Stop if priority has changed */
2370 if (skb->priority < priority)
2371 break;
2373 skb = skb_dequeue(&chan->data_q);
2375 hci_send_frame(skb);
2376 hdev->le_last_tx = jiffies;
2378 cnt--;
2379 chan->sent++;
2380 chan->conn->sent++;
2384 if (hdev->le_pkts)
2385 hdev->le_cnt = cnt;
2386 else
2387 hdev->acl_cnt = cnt;
2389 if (cnt != tmp)
2390 hci_prio_recalculate(hdev, LE_LINK);
2393 static void hci_tx_work(struct work_struct *work)
2395 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2396 struct sk_buff *skb;
2398 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2399 hdev->sco_cnt, hdev->le_cnt);
2401 /* Schedule queues and send stuff to HCI driver */
2403 hci_sched_acl(hdev);
2405 hci_sched_sco(hdev);
2407 hci_sched_esco(hdev);
2409 hci_sched_le(hdev);
2411 /* Send next queued raw (unknown type) packet */
2412 while ((skb = skb_dequeue(&hdev->raw_q)))
2413 hci_send_frame(skb);
2416 /* ----- HCI RX task (incoming data processing) ----- */
2418 /* ACL data packet */
2419 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2421 struct hci_acl_hdr *hdr = (void *) skb->data;
2422 struct hci_conn *conn;
2423 __u16 handle, flags;
2425 skb_pull(skb, HCI_ACL_HDR_SIZE);
2427 handle = __le16_to_cpu(hdr->handle);
2428 flags = hci_flags(handle);
2429 handle = hci_handle(handle);
2431 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2433 hdev->stat.acl_rx++;
2435 hci_dev_lock(hdev);
2436 conn = hci_conn_hash_lookup_handle(hdev, handle);
2437 hci_dev_unlock(hdev);
2439 if (conn) {
2440 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2442 /* Send to upper protocol */
2443 l2cap_recv_acldata(conn, skb, flags);
2444 return;
2445 } else {
2446 BT_ERR("%s ACL packet for unknown connection handle %d",
2447 hdev->name, handle);
2450 kfree_skb(skb);
2453 /* SCO data packet */
2454 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2456 struct hci_sco_hdr *hdr = (void *) skb->data;
2457 struct hci_conn *conn;
2458 __u16 handle;
2460 skb_pull(skb, HCI_SCO_HDR_SIZE);
2462 handle = __le16_to_cpu(hdr->handle);
2464 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2466 hdev->stat.sco_rx++;
2468 hci_dev_lock(hdev);
2469 conn = hci_conn_hash_lookup_handle(hdev, handle);
2470 hci_dev_unlock(hdev);
2472 if (conn) {
2473 /* Send to upper protocol */
2474 sco_recv_scodata(conn, skb);
2475 return;
2476 } else {
2477 BT_ERR("%s SCO packet for unknown connection handle %d",
2478 hdev->name, handle);
2481 kfree_skb(skb);
2484 static void hci_rx_work(struct work_struct *work)
2486 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2487 struct sk_buff *skb;
2489 BT_DBG("%s", hdev->name);
2491 while ((skb = skb_dequeue(&hdev->rx_q))) {
2492 if (atomic_read(&hdev->promisc)) {
2493 /* Send copy to the sockets */
2494 hci_send_to_sock(hdev, skb, NULL);
2497 if (test_bit(HCI_RAW, &hdev->flags)) {
2498 kfree_skb(skb);
2499 continue;
2502 if (test_bit(HCI_INIT, &hdev->flags)) {
2503 /* Don't process data packets in this states. */
2504 switch (bt_cb(skb)->pkt_type) {
2505 case HCI_ACLDATA_PKT:
2506 case HCI_SCODATA_PKT:
2507 kfree_skb(skb);
2508 continue;
2512 /* Process frame */
2513 switch (bt_cb(skb)->pkt_type) {
2514 case HCI_EVENT_PKT:
2515 BT_DBG("%s Event packet", hdev->name);
2516 hci_event_packet(hdev, skb);
2517 break;
2519 case HCI_ACLDATA_PKT:
2520 BT_DBG("%s ACL data packet", hdev->name);
2521 hci_acldata_packet(hdev, skb);
2522 break;
2524 case HCI_SCODATA_PKT:
2525 BT_DBG("%s SCO data packet", hdev->name);
2526 hci_scodata_packet(hdev, skb);
2527 break;
2529 default:
2530 kfree_skb(skb);
2531 break;
2536 static void hci_cmd_work(struct work_struct *work)
2538 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2539 struct sk_buff *skb;
2541 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2543 /* Send queued commands */
2544 if (atomic_read(&hdev->cmd_cnt)) {
2545 skb = skb_dequeue(&hdev->cmd_q);
2546 if (!skb)
2547 return;
2549 kfree_skb(hdev->sent_cmd);
2551 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2552 if (hdev->sent_cmd) {
2553 atomic_dec(&hdev->cmd_cnt);
2554 hci_send_frame(skb);
2555 if (test_bit(HCI_RESET, &hdev->flags))
2556 del_timer(&hdev->cmd_timer);
2557 else
2558 mod_timer(&hdev->cmd_timer,
2559 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2560 } else {
2561 skb_queue_head(&hdev->cmd_q, skb);
2562 queue_work(hdev->workqueue, &hdev->cmd_work);
2567 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2569 /* General inquiry access code (GIAC) */
2570 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2571 struct hci_cp_inquiry cp;
2573 BT_DBG("%s", hdev->name);
2575 if (test_bit(HCI_INQUIRY, &hdev->flags))
2576 return -EINPROGRESS;
2578 memset(&cp, 0, sizeof(cp));
2579 memcpy(&cp.lap, lap, sizeof(cp.lap));
2580 cp.length = length;
2582 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2585 int hci_cancel_inquiry(struct hci_dev *hdev)
2587 BT_DBG("%s", hdev->name);
2589 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2590 return -EPERM;
2592 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2595 module_param(enable_hs, bool, 0644);
2596 MODULE_PARM_DESC(enable_hs, "Enable High Speed");