OMAP: DSS2: Make check-delay-loops consistent
[linux/fpc-iii.git] / net / bluetooth / hci_core.c
blob94ba3498202164e373437b09e6a41c210bac3a1e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
57 static DEFINE_RWLOCK(hci_task_lock);
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
67 /* HCI protocols */
68 #define HCI_MAX_PROTO 2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74 /* ---- HCI notifications ---- */
76 int hci_register_notifier(struct notifier_block *nb)
78 return atomic_notifier_chain_register(&hci_notifier, nb);
81 int hci_unregister_notifier(struct notifier_block *nb)
83 return atomic_notifier_chain_unregister(&hci_notifier, nb);
86 static void hci_notify(struct hci_dev *hdev, int event)
88 atomic_notifier_call_chain(&hci_notifier, event, hdev);
91 /* ---- HCI requests ---- */
93 void hci_req_complete(struct hci_dev *hdev, int result)
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 wake_up_interruptible(&hdev->req_wait_q);
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117 unsigned long opt, __u32 timeout)
119 DECLARE_WAITQUEUE(wait, current);
120 int err = 0;
122 BT_DBG("%s start", hdev->name);
124 hdev->req_status = HCI_REQ_PEND;
126 add_wait_queue(&hdev->req_wait_q, &wait);
127 set_current_state(TASK_INTERRUPTIBLE);
129 req(hdev, opt);
130 schedule_timeout(timeout);
132 remove_wait_queue(&hdev->req_wait_q, &wait);
134 if (signal_pending(current))
135 return -EINTR;
137 switch (hdev->req_status) {
138 case HCI_REQ_DONE:
139 err = -bt_err(hdev->req_result);
140 break;
142 case HCI_REQ_CANCELED:
143 err = -hdev->req_result;
144 break;
146 default:
147 err = -ETIMEDOUT;
148 break;
151 hdev->req_status = hdev->req_result = 0;
153 BT_DBG("%s end: err %d", hdev->name, err);
155 return err;
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 unsigned long opt, __u32 timeout)
161 int ret;
163 if (!test_bit(HCI_UP, &hdev->flags))
164 return -ENETDOWN;
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
171 return ret;
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
178 /* Reset device */
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 struct sk_buff *skb;
185 __le16 param;
186 __u8 flt_type;
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev;
197 skb_queue_tail(&hdev->cmd_q, skb);
198 tasklet_schedule(&hdev->cmd_task);
200 skb_queue_purge(&hdev->driver_init);
202 /* Mandatory initialization */
204 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 /* Read Local Supported Features */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211 /* Read Local Version */
212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217 #if 0
218 /* Host buffer size */
220 struct hci_cp_host_buffer_size cp;
221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223 cp.acl_max_pkt = cpu_to_le16(0xffff);
224 cp.sco_max_pkt = cpu_to_le16(0xffff);
225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227 #endif
229 /* Read BD Address */
230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232 /* Read Class of Device */
233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235 /* Read Local Name */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238 /* Read Voice Setting */
239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241 /* Optional initialization */
243 /* Clear Event Filters */
244 flt_type = HCI_FLT_CLEAR_ALL;
245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247 /* Page timeout ~20 secs */
248 param = cpu_to_le16(0x8000);
249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251 /* Connection accept timeout ~20 secs */
252 param = cpu_to_le16(0x7d00);
253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 __u8 scan = opt;
260 BT_DBG("%s %x", hdev->name, scan);
262 /* Inquiry and Page scans */
263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 __u8 auth = opt;
270 BT_DBG("%s %x", hdev->name, auth);
272 /* Authentication */
273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 __u8 encrypt = opt;
280 BT_DBG("%s %x", hdev->name, encrypt);
282 /* Encryption */
283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
286 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 __le16 policy = cpu_to_le16(opt);
290 BT_DBG("%s %x", hdev->name, policy);
292 /* Default link policy */
293 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
296 /* Get HCI device by index.
297 * Device is held on return. */
298 struct hci_dev *hci_dev_get(int index)
300 struct hci_dev *hdev = NULL;
301 struct list_head *p;
303 BT_DBG("%d", index);
305 if (index < 0)
306 return NULL;
308 read_lock(&hci_dev_list_lock);
309 list_for_each(p, &hci_dev_list) {
310 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311 if (d->id == index) {
312 hdev = hci_dev_hold(d);
313 break;
316 read_unlock(&hci_dev_list_lock);
317 return hdev;
320 /* ---- Inquiry support ---- */
321 static void inquiry_cache_flush(struct hci_dev *hdev)
323 struct inquiry_cache *cache = &hdev->inq_cache;
324 struct inquiry_entry *next = cache->list, *e;
326 BT_DBG("cache %p", cache);
328 cache->list = NULL;
329 while ((e = next)) {
330 next = e->next;
331 kfree(e);
335 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 struct inquiry_cache *cache = &hdev->inq_cache;
338 struct inquiry_entry *e;
340 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342 for (e = cache->list; e; e = e->next)
343 if (!bacmp(&e->data.bdaddr, bdaddr))
344 break;
345 return e;
348 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 struct inquiry_cache *cache = &hdev->inq_cache;
351 struct inquiry_entry *e;
353 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356 /* Entry not in the cache. Add new one. */
357 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
358 return;
359 e->next = cache->list;
360 cache->list = e;
363 memcpy(&e->data, data, sizeof(*data));
364 e->timestamp = jiffies;
365 cache->timestamp = jiffies;
368 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_info *info = (struct inquiry_info *) buf;
372 struct inquiry_entry *e;
373 int copied = 0;
375 for (e = cache->list; e && copied < num; e = e->next, copied++) {
376 struct inquiry_data *data = &e->data;
377 bacpy(&info->bdaddr, &data->bdaddr);
378 info->pscan_rep_mode = data->pscan_rep_mode;
379 info->pscan_period_mode = data->pscan_period_mode;
380 info->pscan_mode = data->pscan_mode;
381 memcpy(info->dev_class, data->dev_class, 3);
382 info->clock_offset = data->clock_offset;
383 info++;
386 BT_DBG("cache %p, copied %d", cache, copied);
387 return copied;
390 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393 struct hci_cp_inquiry cp;
395 BT_DBG("%s", hdev->name);
397 if (test_bit(HCI_INQUIRY, &hdev->flags))
398 return;
400 /* Start Inquiry */
401 memcpy(&cp.lap, &ir->lap, 3);
402 cp.length = ir->length;
403 cp.num_rsp = ir->num_rsp;
404 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
407 int hci_inquiry(void __user *arg)
409 __u8 __user *ptr = arg;
410 struct hci_inquiry_req ir;
411 struct hci_dev *hdev;
412 int err = 0, do_inquiry = 0, max_rsp;
413 long timeo;
414 __u8 *buf;
416 if (copy_from_user(&ir, ptr, sizeof(ir)))
417 return -EFAULT;
419 if (!(hdev = hci_dev_get(ir.dev_id)))
420 return -ENODEV;
422 hci_dev_lock_bh(hdev);
423 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
424 inquiry_cache_empty(hdev) ||
425 ir.flags & IREQ_CACHE_FLUSH) {
426 inquiry_cache_flush(hdev);
427 do_inquiry = 1;
429 hci_dev_unlock_bh(hdev);
431 timeo = ir.length * msecs_to_jiffies(2000);
432 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433 goto done;
435 /* for unlimited number of responses we will use buffer with 255 entries */
436 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439 * copy it to the user space.
441 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442 err = -ENOMEM;
443 goto done;
446 hci_dev_lock_bh(hdev);
447 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448 hci_dev_unlock_bh(hdev);
450 BT_DBG("num_rsp %d", ir.num_rsp);
452 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453 ptr += sizeof(ir);
454 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455 ir.num_rsp))
456 err = -EFAULT;
457 } else
458 err = -EFAULT;
460 kfree(buf);
462 done:
463 hci_dev_put(hdev);
464 return err;
467 /* ---- HCI ioctl helpers ---- */
469 int hci_dev_open(__u16 dev)
471 struct hci_dev *hdev;
472 int ret = 0;
474 if (!(hdev = hci_dev_get(dev)))
475 return -ENODEV;
477 BT_DBG("%s %p", hdev->name, hdev);
479 hci_req_lock(hdev);
481 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482 ret = -ERFKILL;
483 goto done;
486 if (test_bit(HCI_UP, &hdev->flags)) {
487 ret = -EALREADY;
488 goto done;
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags);
494 if (hdev->open(hdev)) {
495 ret = -EIO;
496 goto done;
499 if (!test_bit(HCI_RAW, &hdev->flags)) {
500 atomic_set(&hdev->cmd_cnt, 1);
501 set_bit(HCI_INIT, &hdev->flags);
503 //__hci_request(hdev, hci_reset_req, 0, HZ);
504 ret = __hci_request(hdev, hci_init_req, 0,
505 msecs_to_jiffies(HCI_INIT_TIMEOUT));
507 clear_bit(HCI_INIT, &hdev->flags);
510 if (!ret) {
511 hci_dev_hold(hdev);
512 set_bit(HCI_UP, &hdev->flags);
513 hci_notify(hdev, HCI_DEV_UP);
514 } else {
515 /* Init failed, cleanup */
516 tasklet_kill(&hdev->rx_task);
517 tasklet_kill(&hdev->tx_task);
518 tasklet_kill(&hdev->cmd_task);
520 skb_queue_purge(&hdev->cmd_q);
521 skb_queue_purge(&hdev->rx_q);
523 if (hdev->flush)
524 hdev->flush(hdev);
526 if (hdev->sent_cmd) {
527 kfree_skb(hdev->sent_cmd);
528 hdev->sent_cmd = NULL;
531 hdev->close(hdev);
532 hdev->flags = 0;
535 done:
536 hci_req_unlock(hdev);
537 hci_dev_put(hdev);
538 return ret;
541 static int hci_dev_do_close(struct hci_dev *hdev)
543 BT_DBG("%s %p", hdev->name, hdev);
545 hci_req_cancel(hdev, ENODEV);
546 hci_req_lock(hdev);
548 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
549 hci_req_unlock(hdev);
550 return 0;
553 /* Kill RX and TX tasks */
554 tasklet_kill(&hdev->rx_task);
555 tasklet_kill(&hdev->tx_task);
557 hci_dev_lock_bh(hdev);
558 inquiry_cache_flush(hdev);
559 hci_conn_hash_flush(hdev);
560 hci_dev_unlock_bh(hdev);
562 hci_notify(hdev, HCI_DEV_DOWN);
564 if (hdev->flush)
565 hdev->flush(hdev);
567 /* Reset device */
568 skb_queue_purge(&hdev->cmd_q);
569 atomic_set(&hdev->cmd_cnt, 1);
570 if (!test_bit(HCI_RAW, &hdev->flags)) {
571 set_bit(HCI_INIT, &hdev->flags);
572 __hci_request(hdev, hci_reset_req, 0,
573 msecs_to_jiffies(250));
574 clear_bit(HCI_INIT, &hdev->flags);
577 /* Kill cmd task */
578 tasklet_kill(&hdev->cmd_task);
580 /* Drop queues */
581 skb_queue_purge(&hdev->rx_q);
582 skb_queue_purge(&hdev->cmd_q);
583 skb_queue_purge(&hdev->raw_q);
585 /* Drop last sent command */
586 if (hdev->sent_cmd) {
587 kfree_skb(hdev->sent_cmd);
588 hdev->sent_cmd = NULL;
591 /* After this point our queues are empty
592 * and no tasks are scheduled. */
593 hdev->close(hdev);
595 /* Clear flags */
596 hdev->flags = 0;
598 hci_req_unlock(hdev);
600 hci_dev_put(hdev);
601 return 0;
604 int hci_dev_close(__u16 dev)
606 struct hci_dev *hdev;
607 int err;
609 if (!(hdev = hci_dev_get(dev)))
610 return -ENODEV;
611 err = hci_dev_do_close(hdev);
612 hci_dev_put(hdev);
613 return err;
616 int hci_dev_reset(__u16 dev)
618 struct hci_dev *hdev;
619 int ret = 0;
621 if (!(hdev = hci_dev_get(dev)))
622 return -ENODEV;
624 hci_req_lock(hdev);
625 tasklet_disable(&hdev->tx_task);
627 if (!test_bit(HCI_UP, &hdev->flags))
628 goto done;
630 /* Drop queues */
631 skb_queue_purge(&hdev->rx_q);
632 skb_queue_purge(&hdev->cmd_q);
634 hci_dev_lock_bh(hdev);
635 inquiry_cache_flush(hdev);
636 hci_conn_hash_flush(hdev);
637 hci_dev_unlock_bh(hdev);
639 if (hdev->flush)
640 hdev->flush(hdev);
642 atomic_set(&hdev->cmd_cnt, 1);
643 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
645 if (!test_bit(HCI_RAW, &hdev->flags))
646 ret = __hci_request(hdev, hci_reset_req, 0,
647 msecs_to_jiffies(HCI_INIT_TIMEOUT));
649 done:
650 tasklet_enable(&hdev->tx_task);
651 hci_req_unlock(hdev);
652 hci_dev_put(hdev);
653 return ret;
656 int hci_dev_reset_stat(__u16 dev)
658 struct hci_dev *hdev;
659 int ret = 0;
661 if (!(hdev = hci_dev_get(dev)))
662 return -ENODEV;
664 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
666 hci_dev_put(hdev);
668 return ret;
671 int hci_dev_cmd(unsigned int cmd, void __user *arg)
673 struct hci_dev *hdev;
674 struct hci_dev_req dr;
675 int err = 0;
677 if (copy_from_user(&dr, arg, sizeof(dr)))
678 return -EFAULT;
680 if (!(hdev = hci_dev_get(dr.dev_id)))
681 return -ENODEV;
683 switch (cmd) {
684 case HCISETAUTH:
685 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
687 break;
689 case HCISETENCRYPT:
690 if (!lmp_encrypt_capable(hdev)) {
691 err = -EOPNOTSUPP;
692 break;
695 if (!test_bit(HCI_AUTH, &hdev->flags)) {
696 /* Auth must be enabled first */
697 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 if (err)
700 break;
703 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
705 break;
707 case HCISETSCAN:
708 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 break;
712 case HCISETLINKPOL:
713 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
715 break;
717 case HCISETLINKMODE:
718 hdev->link_mode = ((__u16) dr.dev_opt) &
719 (HCI_LM_MASTER | HCI_LM_ACCEPT);
720 break;
722 case HCISETPTYPE:
723 hdev->pkt_type = (__u16) dr.dev_opt;
724 break;
726 case HCISETACLMTU:
727 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
728 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
729 break;
731 case HCISETSCOMTU:
732 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
734 break;
736 default:
737 err = -EINVAL;
738 break;
741 hci_dev_put(hdev);
742 return err;
745 int hci_get_dev_list(void __user *arg)
747 struct hci_dev_list_req *dl;
748 struct hci_dev_req *dr;
749 struct list_head *p;
750 int n = 0, size, err;
751 __u16 dev_num;
753 if (get_user(dev_num, (__u16 __user *) arg))
754 return -EFAULT;
756 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
757 return -EINVAL;
759 size = sizeof(*dl) + dev_num * sizeof(*dr);
761 if (!(dl = kzalloc(size, GFP_KERNEL)))
762 return -ENOMEM;
764 dr = dl->dev_req;
766 read_lock_bh(&hci_dev_list_lock);
767 list_for_each(p, &hci_dev_list) {
768 struct hci_dev *hdev;
769 hdev = list_entry(p, struct hci_dev, list);
770 (dr + n)->dev_id = hdev->id;
771 (dr + n)->dev_opt = hdev->flags;
772 if (++n >= dev_num)
773 break;
775 read_unlock_bh(&hci_dev_list_lock);
777 dl->dev_num = n;
778 size = sizeof(*dl) + n * sizeof(*dr);
780 err = copy_to_user(arg, dl, size);
781 kfree(dl);
783 return err ? -EFAULT : 0;
786 int hci_get_dev_info(void __user *arg)
788 struct hci_dev *hdev;
789 struct hci_dev_info di;
790 int err = 0;
792 if (copy_from_user(&di, arg, sizeof(di)))
793 return -EFAULT;
795 if (!(hdev = hci_dev_get(di.dev_id)))
796 return -ENODEV;
798 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr;
800 di.type = hdev->type;
801 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu;
804 di.acl_pkts = hdev->acl_pkts;
805 di.sco_mtu = hdev->sco_mtu;
806 di.sco_pkts = hdev->sco_pkts;
807 di.link_policy = hdev->link_policy;
808 di.link_mode = hdev->link_mode;
810 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
811 memcpy(&di.features, &hdev->features, sizeof(di.features));
813 if (copy_to_user(arg, &di, sizeof(di)))
814 err = -EFAULT;
816 hci_dev_put(hdev);
818 return err;
821 /* ---- Interface to HCI drivers ---- */
823 static int hci_rfkill_set_block(void *data, bool blocked)
825 struct hci_dev *hdev = data;
827 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
829 if (!blocked)
830 return 0;
832 hci_dev_do_close(hdev);
834 return 0;
837 static const struct rfkill_ops hci_rfkill_ops = {
838 .set_block = hci_rfkill_set_block,
841 /* Alloc HCI device */
842 struct hci_dev *hci_alloc_dev(void)
844 struct hci_dev *hdev;
846 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
847 if (!hdev)
848 return NULL;
850 skb_queue_head_init(&hdev->driver_init);
852 return hdev;
854 EXPORT_SYMBOL(hci_alloc_dev);
856 /* Free HCI device */
857 void hci_free_dev(struct hci_dev *hdev)
859 skb_queue_purge(&hdev->driver_init);
861 /* will free via device release */
862 put_device(&hdev->dev);
864 EXPORT_SYMBOL(hci_free_dev);
866 /* Register HCI device */
867 int hci_register_dev(struct hci_dev *hdev)
869 struct list_head *head = &hci_dev_list, *p;
870 int i, id = 0;
872 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
873 hdev->type, hdev->owner);
875 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL;
878 write_lock_bh(&hci_dev_list_lock);
880 /* Find first available device id */
881 list_for_each(p, &hci_dev_list) {
882 if (list_entry(p, struct hci_dev, list)->id != id)
883 break;
884 head = p; id++;
887 sprintf(hdev->name, "hci%d", id);
888 hdev->id = id;
889 list_add(&hdev->list, head);
891 atomic_set(&hdev->refcnt, 1);
892 spin_lock_init(&hdev->lock);
894 hdev->flags = 0;
895 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
896 hdev->esco_type = (ESCO_HV1);
897 hdev->link_mode = (HCI_LM_ACCEPT);
899 hdev->idle_timeout = 0;
900 hdev->sniff_max_interval = 800;
901 hdev->sniff_min_interval = 80;
903 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
904 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
905 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
907 skb_queue_head_init(&hdev->rx_q);
908 skb_queue_head_init(&hdev->cmd_q);
909 skb_queue_head_init(&hdev->raw_q);
911 for (i = 0; i < 3; i++)
912 hdev->reassembly[i] = NULL;
914 init_waitqueue_head(&hdev->req_wait_q);
915 mutex_init(&hdev->req_lock);
917 inquiry_cache_init(hdev);
919 hci_conn_hash_init(hdev);
921 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
923 atomic_set(&hdev->promisc, 0);
925 write_unlock_bh(&hci_dev_list_lock);
927 hci_register_sysfs(hdev);
929 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
930 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
931 if (hdev->rfkill) {
932 if (rfkill_register(hdev->rfkill) < 0) {
933 rfkill_destroy(hdev->rfkill);
934 hdev->rfkill = NULL;
938 hci_notify(hdev, HCI_DEV_REG);
940 return id;
942 EXPORT_SYMBOL(hci_register_dev);
944 /* Unregister HCI device */
945 int hci_unregister_dev(struct hci_dev *hdev)
947 int i;
949 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
951 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list);
953 write_unlock_bh(&hci_dev_list_lock);
955 hci_dev_do_close(hdev);
957 for (i = 0; i < 3; i++)
958 kfree_skb(hdev->reassembly[i]);
960 hci_notify(hdev, HCI_DEV_UNREG);
962 if (hdev->rfkill) {
963 rfkill_unregister(hdev->rfkill);
964 rfkill_destroy(hdev->rfkill);
967 hci_unregister_sysfs(hdev);
969 __hci_dev_put(hdev);
971 return 0;
973 EXPORT_SYMBOL(hci_unregister_dev);
975 /* Suspend HCI device */
976 int hci_suspend_dev(struct hci_dev *hdev)
978 hci_notify(hdev, HCI_DEV_SUSPEND);
979 return 0;
981 EXPORT_SYMBOL(hci_suspend_dev);
983 /* Resume HCI device */
984 int hci_resume_dev(struct hci_dev *hdev)
986 hci_notify(hdev, HCI_DEV_RESUME);
987 return 0;
989 EXPORT_SYMBOL(hci_resume_dev);
991 /* Receive frame from HCI drivers */
992 int hci_recv_frame(struct sk_buff *skb)
994 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
995 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
996 && !test_bit(HCI_INIT, &hdev->flags))) {
997 kfree_skb(skb);
998 return -ENXIO;
1001 /* Incomming skb */
1002 bt_cb(skb)->incoming = 1;
1004 /* Time stamp */
1005 __net_timestamp(skb);
1007 /* Queue frame for rx task */
1008 skb_queue_tail(&hdev->rx_q, skb);
1009 tasklet_schedule(&hdev->rx_task);
1011 return 0;
1013 EXPORT_SYMBOL(hci_recv_frame);
1015 /* Receive packet type fragment */
1016 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1018 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1020 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1021 return -EILSEQ;
1023 while (count) {
1024 struct sk_buff *skb = __reassembly(hdev, type);
1025 struct { int expect; } *scb;
1026 int len = 0;
1028 if (!skb) {
1029 /* Start of the frame */
1031 switch (type) {
1032 case HCI_EVENT_PKT:
1033 if (count >= HCI_EVENT_HDR_SIZE) {
1034 struct hci_event_hdr *h = data;
1035 len = HCI_EVENT_HDR_SIZE + h->plen;
1036 } else
1037 return -EILSEQ;
1038 break;
1040 case HCI_ACLDATA_PKT:
1041 if (count >= HCI_ACL_HDR_SIZE) {
1042 struct hci_acl_hdr *h = data;
1043 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1044 } else
1045 return -EILSEQ;
1046 break;
1048 case HCI_SCODATA_PKT:
1049 if (count >= HCI_SCO_HDR_SIZE) {
1050 struct hci_sco_hdr *h = data;
1051 len = HCI_SCO_HDR_SIZE + h->dlen;
1052 } else
1053 return -EILSEQ;
1054 break;
1057 skb = bt_skb_alloc(len, GFP_ATOMIC);
1058 if (!skb) {
1059 BT_ERR("%s no memory for packet", hdev->name);
1060 return -ENOMEM;
1063 skb->dev = (void *) hdev;
1064 bt_cb(skb)->pkt_type = type;
1066 __reassembly(hdev, type) = skb;
1068 scb = (void *) skb->cb;
1069 scb->expect = len;
1070 } else {
1071 /* Continuation */
1073 scb = (void *) skb->cb;
1074 len = scb->expect;
1077 len = min(len, count);
1079 memcpy(skb_put(skb, len), data, len);
1081 scb->expect -= len;
1083 if (scb->expect == 0) {
1084 /* Complete frame */
1086 __reassembly(hdev, type) = NULL;
1088 bt_cb(skb)->pkt_type = type;
1089 hci_recv_frame(skb);
1092 count -= len; data += len;
1095 return 0;
1097 EXPORT_SYMBOL(hci_recv_fragment);
1099 /* ---- Interface to upper protocols ---- */
1101 /* Register/Unregister protocols.
1102 * hci_task_lock is used to ensure that no tasks are running. */
1103 int hci_register_proto(struct hci_proto *hp)
1105 int err = 0;
1107 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1109 if (hp->id >= HCI_MAX_PROTO)
1110 return -EINVAL;
1112 write_lock_bh(&hci_task_lock);
1114 if (!hci_proto[hp->id])
1115 hci_proto[hp->id] = hp;
1116 else
1117 err = -EEXIST;
1119 write_unlock_bh(&hci_task_lock);
1121 return err;
1123 EXPORT_SYMBOL(hci_register_proto);
1125 int hci_unregister_proto(struct hci_proto *hp)
1127 int err = 0;
1129 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1131 if (hp->id >= HCI_MAX_PROTO)
1132 return -EINVAL;
1134 write_lock_bh(&hci_task_lock);
1136 if (hci_proto[hp->id])
1137 hci_proto[hp->id] = NULL;
1138 else
1139 err = -ENOENT;
1141 write_unlock_bh(&hci_task_lock);
1143 return err;
1145 EXPORT_SYMBOL(hci_unregister_proto);
1147 int hci_register_cb(struct hci_cb *cb)
1149 BT_DBG("%p name %s", cb, cb->name);
1151 write_lock_bh(&hci_cb_list_lock);
1152 list_add(&cb->list, &hci_cb_list);
1153 write_unlock_bh(&hci_cb_list_lock);
1155 return 0;
1157 EXPORT_SYMBOL(hci_register_cb);
1159 int hci_unregister_cb(struct hci_cb *cb)
1161 BT_DBG("%p name %s", cb, cb->name);
1163 write_lock_bh(&hci_cb_list_lock);
1164 list_del(&cb->list);
1165 write_unlock_bh(&hci_cb_list_lock);
1167 return 0;
1169 EXPORT_SYMBOL(hci_unregister_cb);
1171 static int hci_send_frame(struct sk_buff *skb)
1173 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1175 if (!hdev) {
1176 kfree_skb(skb);
1177 return -ENODEV;
1180 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1182 if (atomic_read(&hdev->promisc)) {
1183 /* Time stamp */
1184 __net_timestamp(skb);
1186 hci_send_to_sock(hdev, skb);
1189 /* Get rid of skb owner, prior to sending to the driver. */
1190 skb_orphan(skb);
1192 return hdev->send(skb);
1195 /* Send HCI command */
1196 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1198 int len = HCI_COMMAND_HDR_SIZE + plen;
1199 struct hci_command_hdr *hdr;
1200 struct sk_buff *skb;
1202 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1204 skb = bt_skb_alloc(len, GFP_ATOMIC);
1205 if (!skb) {
1206 BT_ERR("%s no memory for command", hdev->name);
1207 return -ENOMEM;
1210 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1211 hdr->opcode = cpu_to_le16(opcode);
1212 hdr->plen = plen;
1214 if (plen)
1215 memcpy(skb_put(skb, plen), param, plen);
1217 BT_DBG("skb len %d", skb->len);
1219 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1220 skb->dev = (void *) hdev;
1222 skb_queue_tail(&hdev->cmd_q, skb);
1223 tasklet_schedule(&hdev->cmd_task);
1225 return 0;
1228 /* Get data from the previously sent command */
1229 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1231 struct hci_command_hdr *hdr;
1233 if (!hdev->sent_cmd)
1234 return NULL;
1236 hdr = (void *) hdev->sent_cmd->data;
1238 if (hdr->opcode != cpu_to_le16(opcode))
1239 return NULL;
1241 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1243 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1246 /* Send ACL data */
1247 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1249 struct hci_acl_hdr *hdr;
1250 int len = skb->len;
1252 skb_push(skb, HCI_ACL_HDR_SIZE);
1253 skb_reset_transport_header(skb);
1254 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1255 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1256 hdr->dlen = cpu_to_le16(len);
1259 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1261 struct hci_dev *hdev = conn->hdev;
1262 struct sk_buff *list;
1264 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1266 skb->dev = (void *) hdev;
1267 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1268 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1270 if (!(list = skb_shinfo(skb)->frag_list)) {
1271 /* Non fragmented */
1272 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1274 skb_queue_tail(&conn->data_q, skb);
1275 } else {
1276 /* Fragmented */
1277 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1279 skb_shinfo(skb)->frag_list = NULL;
1281 /* Queue all fragments atomically */
1282 spin_lock_bh(&conn->data_q.lock);
1284 __skb_queue_tail(&conn->data_q, skb);
1285 do {
1286 skb = list; list = list->next;
1288 skb->dev = (void *) hdev;
1289 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1290 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1292 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1294 __skb_queue_tail(&conn->data_q, skb);
1295 } while (list);
1297 spin_unlock_bh(&conn->data_q.lock);
1300 tasklet_schedule(&hdev->tx_task);
1302 return 0;
1304 EXPORT_SYMBOL(hci_send_acl);
1306 /* Send SCO data */
1307 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1309 struct hci_dev *hdev = conn->hdev;
1310 struct hci_sco_hdr hdr;
1312 BT_DBG("%s len %d", hdev->name, skb->len);
1314 if (skb->len > hdev->sco_mtu) {
1315 kfree_skb(skb);
1316 return -EINVAL;
1319 hdr.handle = cpu_to_le16(conn->handle);
1320 hdr.dlen = skb->len;
1322 skb_push(skb, HCI_SCO_HDR_SIZE);
1323 skb_reset_transport_header(skb);
1324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1326 skb->dev = (void *) hdev;
1327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1329 skb_queue_tail(&conn->data_q, skb);
1330 tasklet_schedule(&hdev->tx_task);
1332 return 0;
1334 EXPORT_SYMBOL(hci_send_sco);
1336 /* ---- HCI TX task (outgoing data) ---- */
1338 /* HCI Connection scheduler */
1339 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1341 struct hci_conn_hash *h = &hdev->conn_hash;
1342 struct hci_conn *conn = NULL;
1343 int num = 0, min = ~0;
1344 struct list_head *p;
1346 /* We don't have to lock device here. Connections are always
1347 * added and removed with TX task disabled. */
1348 list_for_each(p, &h->list) {
1349 struct hci_conn *c;
1350 c = list_entry(p, struct hci_conn, list);
1352 if (c->type != type || skb_queue_empty(&c->data_q))
1353 continue;
1355 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1356 continue;
1358 num++;
1360 if (c->sent < min) {
1361 min = c->sent;
1362 conn = c;
1366 if (conn) {
1367 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1368 int q = cnt / num;
1369 *quote = q ? q : 1;
1370 } else
1371 *quote = 0;
1373 BT_DBG("conn %p quote %d", conn, *quote);
1374 return conn;
1377 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1379 struct hci_conn_hash *h = &hdev->conn_hash;
1380 struct list_head *p;
1381 struct hci_conn *c;
1383 BT_ERR("%s ACL tx timeout", hdev->name);
1385 /* Kill stalled connections */
1386 list_for_each(p, &h->list) {
1387 c = list_entry(p, struct hci_conn, list);
1388 if (c->type == ACL_LINK && c->sent) {
1389 BT_ERR("%s killing stalled ACL connection %s",
1390 hdev->name, batostr(&c->dst));
1391 hci_acl_disconn(c, 0x13);
1396 static inline void hci_sched_acl(struct hci_dev *hdev)
1398 struct hci_conn *conn;
1399 struct sk_buff *skb;
1400 int quote;
1402 BT_DBG("%s", hdev->name);
1404 if (!test_bit(HCI_RAW, &hdev->flags)) {
1405 /* ACL tx timeout must be longer than maximum
1406 * link supervision timeout (40.9 seconds) */
1407 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1408 hci_acl_tx_to(hdev);
1411 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1412 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1413 BT_DBG("skb %p len %d", skb, skb->len);
1415 hci_conn_enter_active_mode(conn);
1417 hci_send_frame(skb);
1418 hdev->acl_last_tx = jiffies;
1420 hdev->acl_cnt--;
1421 conn->sent++;
1426 /* Schedule SCO */
1427 static inline void hci_sched_sco(struct hci_dev *hdev)
1429 struct hci_conn *conn;
1430 struct sk_buff *skb;
1431 int quote;
1433 BT_DBG("%s", hdev->name);
1435 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1436 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1437 BT_DBG("skb %p len %d", skb, skb->len);
1438 hci_send_frame(skb);
1440 conn->sent++;
1441 if (conn->sent == ~0)
1442 conn->sent = 0;
1447 static inline void hci_sched_esco(struct hci_dev *hdev)
1449 struct hci_conn *conn;
1450 struct sk_buff *skb;
1451 int quote;
1453 BT_DBG("%s", hdev->name);
1455 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1456 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1457 BT_DBG("skb %p len %d", skb, skb->len);
1458 hci_send_frame(skb);
1460 conn->sent++;
1461 if (conn->sent == ~0)
1462 conn->sent = 0;
1467 static void hci_tx_task(unsigned long arg)
1469 struct hci_dev *hdev = (struct hci_dev *) arg;
1470 struct sk_buff *skb;
1472 read_lock(&hci_task_lock);
1474 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1476 /* Schedule queues and send stuff to HCI driver */
1478 hci_sched_acl(hdev);
1480 hci_sched_sco(hdev);
1482 hci_sched_esco(hdev);
1484 /* Send next queued raw (unknown type) packet */
1485 while ((skb = skb_dequeue(&hdev->raw_q)))
1486 hci_send_frame(skb);
1488 read_unlock(&hci_task_lock);
1491 /* ----- HCI RX task (incoming data proccessing) ----- */
1493 /* ACL data packet */
1494 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1496 struct hci_acl_hdr *hdr = (void *) skb->data;
1497 struct hci_conn *conn;
1498 __u16 handle, flags;
1500 skb_pull(skb, HCI_ACL_HDR_SIZE);
1502 handle = __le16_to_cpu(hdr->handle);
1503 flags = hci_flags(handle);
1504 handle = hci_handle(handle);
1506 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1508 hdev->stat.acl_rx++;
1510 hci_dev_lock(hdev);
1511 conn = hci_conn_hash_lookup_handle(hdev, handle);
1512 hci_dev_unlock(hdev);
1514 if (conn) {
1515 register struct hci_proto *hp;
1517 hci_conn_enter_active_mode(conn);
1519 /* Send to upper protocol */
1520 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1521 hp->recv_acldata(conn, skb, flags);
1522 return;
1524 } else {
1525 BT_ERR("%s ACL packet for unknown connection handle %d",
1526 hdev->name, handle);
1529 kfree_skb(skb);
1532 /* SCO data packet */
1533 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1535 struct hci_sco_hdr *hdr = (void *) skb->data;
1536 struct hci_conn *conn;
1537 __u16 handle;
1539 skb_pull(skb, HCI_SCO_HDR_SIZE);
1541 handle = __le16_to_cpu(hdr->handle);
1543 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1545 hdev->stat.sco_rx++;
1547 hci_dev_lock(hdev);
1548 conn = hci_conn_hash_lookup_handle(hdev, handle);
1549 hci_dev_unlock(hdev);
1551 if (conn) {
1552 register struct hci_proto *hp;
1554 /* Send to upper protocol */
1555 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1556 hp->recv_scodata(conn, skb);
1557 return;
1559 } else {
1560 BT_ERR("%s SCO packet for unknown connection handle %d",
1561 hdev->name, handle);
1564 kfree_skb(skb);
1567 static void hci_rx_task(unsigned long arg)
1569 struct hci_dev *hdev = (struct hci_dev *) arg;
1570 struct sk_buff *skb;
1572 BT_DBG("%s", hdev->name);
1574 read_lock(&hci_task_lock);
1576 while ((skb = skb_dequeue(&hdev->rx_q))) {
1577 if (atomic_read(&hdev->promisc)) {
1578 /* Send copy to the sockets */
1579 hci_send_to_sock(hdev, skb);
1582 if (test_bit(HCI_RAW, &hdev->flags)) {
1583 kfree_skb(skb);
1584 continue;
1587 if (test_bit(HCI_INIT, &hdev->flags)) {
1588 /* Don't process data packets in this states. */
1589 switch (bt_cb(skb)->pkt_type) {
1590 case HCI_ACLDATA_PKT:
1591 case HCI_SCODATA_PKT:
1592 kfree_skb(skb);
1593 continue;
1597 /* Process frame */
1598 switch (bt_cb(skb)->pkt_type) {
1599 case HCI_EVENT_PKT:
1600 hci_event_packet(hdev, skb);
1601 break;
1603 case HCI_ACLDATA_PKT:
1604 BT_DBG("%s ACL data packet", hdev->name);
1605 hci_acldata_packet(hdev, skb);
1606 break;
1608 case HCI_SCODATA_PKT:
1609 BT_DBG("%s SCO data packet", hdev->name);
1610 hci_scodata_packet(hdev, skb);
1611 break;
1613 default:
1614 kfree_skb(skb);
1615 break;
1619 read_unlock(&hci_task_lock);
1622 static void hci_cmd_task(unsigned long arg)
1624 struct hci_dev *hdev = (struct hci_dev *) arg;
1625 struct sk_buff *skb;
1627 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1629 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1630 BT_ERR("%s command tx timeout", hdev->name);
1631 atomic_set(&hdev->cmd_cnt, 1);
1634 /* Send queued commands */
1635 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1636 kfree_skb(hdev->sent_cmd);
1638 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1639 atomic_dec(&hdev->cmd_cnt);
1640 hci_send_frame(skb);
1641 hdev->cmd_last_tx = jiffies;
1642 } else {
1643 skb_queue_head(&hdev->cmd_q, skb);
1644 tasklet_schedule(&hdev->cmd_task);