Added !vmlinux.lds.h to .gitignore because it would otherwise be ignored.
[jz_linux_2.6.24.3.git] / net / bluetooth / hci_core.c
blob372b0d3b75a8f3ebf10e4701533e9fce6d488870
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/module.h>
28 #include <linux/kmod.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
41 #include <net/sock.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
51 #undef BT_DBG
52 #define BT_DBG(D...)
53 #endif
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
60 static DEFINE_RWLOCK(hci_task_lock);
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
70 /* HCI protocols */
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block *nb)
81 return atomic_notifier_chain_register(&hci_notifier, nb);
84 int hci_unregister_notifier(struct notifier_block *nb)
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev *hdev, int result)
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout)
122 DECLARE_WAITQUEUE(wait, current);
123 int err = 0;
125 BT_DBG("%s start", hdev->name);
127 hdev->req_status = HCI_REQ_PEND;
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
132 req(hdev, opt);
133 schedule_timeout(timeout);
135 remove_wait_queue(&hdev->req_wait_q, &wait);
137 if (signal_pending(current))
138 return -EINTR;
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_err(hdev->req_result);
143 break;
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
149 default:
150 err = -ETIMEDOUT;
151 break;
154 hdev->req_status = hdev->req_result = 0;
156 BT_DBG("%s end: err %d", hdev->name, err);
158 return err;
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
164 int ret;
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
171 return ret;
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
178 /* Reset device */
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 struct sk_buff *skb;
185 __le16 param;
186 __u8 flt_type;
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev);
199 skb_queue_purge(&hdev->driver_init);
201 /* Mandatory initialization */
203 /* Reset */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 /* Read Local Version */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216 #if 0
217 /* Host buffer size */
219 struct hci_cp_host_buffer_size cp;
220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226 #endif
228 /* Read BD Address */
229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237 /* Read Voice Setting */
238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240 /* Optional initialization */
242 /* Clear Event Filters */
243 flt_type = HCI_FLT_CLEAR_ALL;
244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246 /* Page timeout ~20 secs */
247 param = cpu_to_le16(0x8000);
248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250 /* Connection accept timeout ~20 secs */
251 param = cpu_to_le16(0x7d00);
252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 __u8 scan = opt;
259 BT_DBG("%s %x", hdev->name, scan);
261 /* Inquiry and Page scans */
262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 __u8 auth = opt;
269 BT_DBG("%s %x", hdev->name, auth);
271 /* Authentication */
272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 __u8 encrypt = opt;
279 BT_DBG("%s %x", hdev->name, encrypt);
281 /* Authentication */
282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 /* Get HCI device by index.
286 * Device is held on return. */
287 struct hci_dev *hci_dev_get(int index)
289 struct hci_dev *hdev = NULL;
290 struct list_head *p;
292 BT_DBG("%d", index);
294 if (index < 0)
295 return NULL;
297 read_lock(&hci_dev_list_lock);
298 list_for_each(p, &hci_dev_list) {
299 struct hci_dev *d = list_entry(p, struct hci_dev, list);
300 if (d->id == index) {
301 hdev = hci_dev_hold(d);
302 break;
305 read_unlock(&hci_dev_list_lock);
306 return hdev;
309 /* ---- Inquiry support ---- */
310 static void inquiry_cache_flush(struct hci_dev *hdev)
312 struct inquiry_cache *cache = &hdev->inq_cache;
313 struct inquiry_entry *next = cache->list, *e;
315 BT_DBG("cache %p", cache);
317 cache->list = NULL;
318 while ((e = next)) {
319 next = e->next;
320 kfree(e);
324 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
326 struct inquiry_cache *cache = &hdev->inq_cache;
327 struct inquiry_entry *e;
329 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
331 for (e = cache->list; e; e = e->next)
332 if (!bacmp(&e->data.bdaddr, bdaddr))
333 break;
334 return e;
337 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
339 struct inquiry_cache *cache = &hdev->inq_cache;
340 struct inquiry_entry *e;
342 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
344 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
345 /* Entry not in the cache. Add new one. */
346 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
347 return;
348 e->next = cache->list;
349 cache->list = e;
352 memcpy(&e->data, data, sizeof(*data));
353 e->timestamp = jiffies;
354 cache->timestamp = jiffies;
357 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_info *info = (struct inquiry_info *) buf;
361 struct inquiry_entry *e;
362 int copied = 0;
364 for (e = cache->list; e && copied < num; e = e->next, copied++) {
365 struct inquiry_data *data = &e->data;
366 bacpy(&info->bdaddr, &data->bdaddr);
367 info->pscan_rep_mode = data->pscan_rep_mode;
368 info->pscan_period_mode = data->pscan_period_mode;
369 info->pscan_mode = data->pscan_mode;
370 memcpy(info->dev_class, data->dev_class, 3);
371 info->clock_offset = data->clock_offset;
372 info++;
375 BT_DBG("cache %p, copied %d", cache, copied);
376 return copied;
379 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
381 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
382 struct hci_cp_inquiry cp;
384 BT_DBG("%s", hdev->name);
386 if (test_bit(HCI_INQUIRY, &hdev->flags))
387 return;
389 /* Start Inquiry */
390 memcpy(&cp.lap, &ir->lap, 3);
391 cp.length = ir->length;
392 cp.num_rsp = ir->num_rsp;
393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
396 int hci_inquiry(void __user *arg)
398 __u8 __user *ptr = arg;
399 struct hci_inquiry_req ir;
400 struct hci_dev *hdev;
401 int err = 0, do_inquiry = 0, max_rsp;
402 long timeo;
403 __u8 *buf;
405 if (copy_from_user(&ir, ptr, sizeof(ir)))
406 return -EFAULT;
408 if (!(hdev = hci_dev_get(ir.dev_id)))
409 return -ENODEV;
411 hci_dev_lock_bh(hdev);
412 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
413 inquiry_cache_empty(hdev) ||
414 ir.flags & IREQ_CACHE_FLUSH) {
415 inquiry_cache_flush(hdev);
416 do_inquiry = 1;
418 hci_dev_unlock_bh(hdev);
420 timeo = ir.length * msecs_to_jiffies(2000);
421 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
422 goto done;
424 /* for unlimited number of responses we will use buffer with 255 entries */
425 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
427 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
428 * copy it to the user space.
430 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
431 err = -ENOMEM;
432 goto done;
435 hci_dev_lock_bh(hdev);
436 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
437 hci_dev_unlock_bh(hdev);
439 BT_DBG("num_rsp %d", ir.num_rsp);
441 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
442 ptr += sizeof(ir);
443 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
444 ir.num_rsp))
445 err = -EFAULT;
446 } else
447 err = -EFAULT;
449 kfree(buf);
451 done:
452 hci_dev_put(hdev);
453 return err;
456 /* ---- HCI ioctl helpers ---- */
458 int hci_dev_open(__u16 dev)
460 struct hci_dev *hdev;
461 int ret = 0;
463 if (!(hdev = hci_dev_get(dev)))
464 return -ENODEV;
466 BT_DBG("%s %p", hdev->name, hdev);
468 hci_req_lock(hdev);
470 if (test_bit(HCI_UP, &hdev->flags)) {
471 ret = -EALREADY;
472 goto done;
475 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
476 set_bit(HCI_RAW, &hdev->flags);
478 if (hdev->open(hdev)) {
479 ret = -EIO;
480 goto done;
483 if (!test_bit(HCI_RAW, &hdev->flags)) {
484 atomic_set(&hdev->cmd_cnt, 1);
485 set_bit(HCI_INIT, &hdev->flags);
487 //__hci_request(hdev, hci_reset_req, 0, HZ);
488 ret = __hci_request(hdev, hci_init_req, 0,
489 msecs_to_jiffies(HCI_INIT_TIMEOUT));
491 clear_bit(HCI_INIT, &hdev->flags);
494 if (!ret) {
495 hci_dev_hold(hdev);
496 set_bit(HCI_UP, &hdev->flags);
497 hci_notify(hdev, HCI_DEV_UP);
498 } else {
499 /* Init failed, cleanup */
500 tasklet_kill(&hdev->rx_task);
501 tasklet_kill(&hdev->tx_task);
502 tasklet_kill(&hdev->cmd_task);
504 skb_queue_purge(&hdev->cmd_q);
505 skb_queue_purge(&hdev->rx_q);
507 if (hdev->flush)
508 hdev->flush(hdev);
510 if (hdev->sent_cmd) {
511 kfree_skb(hdev->sent_cmd);
512 hdev->sent_cmd = NULL;
515 hdev->close(hdev);
516 hdev->flags = 0;
519 done:
520 hci_req_unlock(hdev);
521 hci_dev_put(hdev);
522 return ret;
525 static int hci_dev_do_close(struct hci_dev *hdev)
527 BT_DBG("%s %p", hdev->name, hdev);
529 hci_req_cancel(hdev, ENODEV);
530 hci_req_lock(hdev);
532 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
533 hci_req_unlock(hdev);
534 return 0;
537 /* Kill RX and TX tasks */
538 tasklet_kill(&hdev->rx_task);
539 tasklet_kill(&hdev->tx_task);
541 hci_dev_lock_bh(hdev);
542 inquiry_cache_flush(hdev);
543 hci_conn_hash_flush(hdev);
544 hci_dev_unlock_bh(hdev);
546 hci_notify(hdev, HCI_DEV_DOWN);
548 if (hdev->flush)
549 hdev->flush(hdev);
551 /* Reset device */
552 skb_queue_purge(&hdev->cmd_q);
553 atomic_set(&hdev->cmd_cnt, 1);
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 set_bit(HCI_INIT, &hdev->flags);
556 __hci_request(hdev, hci_reset_req, 0,
557 msecs_to_jiffies(250));
558 clear_bit(HCI_INIT, &hdev->flags);
561 /* Kill cmd task */
562 tasklet_kill(&hdev->cmd_task);
564 /* Drop queues */
565 skb_queue_purge(&hdev->rx_q);
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->raw_q);
569 /* Drop last sent command */
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
575 /* After this point our queues are empty
576 * and no tasks are scheduled. */
577 hdev->close(hdev);
579 /* Clear flags */
580 hdev->flags = 0;
582 hci_req_unlock(hdev);
584 hci_dev_put(hdev);
585 return 0;
588 int hci_dev_close(__u16 dev)
590 struct hci_dev *hdev;
591 int err;
593 if (!(hdev = hci_dev_get(dev)))
594 return -ENODEV;
595 err = hci_dev_do_close(hdev);
596 hci_dev_put(hdev);
597 return err;
600 int hci_dev_reset(__u16 dev)
602 struct hci_dev *hdev;
603 int ret = 0;
605 if (!(hdev = hci_dev_get(dev)))
606 return -ENODEV;
608 hci_req_lock(hdev);
609 tasklet_disable(&hdev->tx_task);
611 if (!test_bit(HCI_UP, &hdev->flags))
612 goto done;
614 /* Drop queues */
615 skb_queue_purge(&hdev->rx_q);
616 skb_queue_purge(&hdev->cmd_q);
618 hci_dev_lock_bh(hdev);
619 inquiry_cache_flush(hdev);
620 hci_conn_hash_flush(hdev);
621 hci_dev_unlock_bh(hdev);
623 if (hdev->flush)
624 hdev->flush(hdev);
626 atomic_set(&hdev->cmd_cnt, 1);
627 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
629 if (!test_bit(HCI_RAW, &hdev->flags))
630 ret = __hci_request(hdev, hci_reset_req, 0,
631 msecs_to_jiffies(HCI_INIT_TIMEOUT));
633 done:
634 tasklet_enable(&hdev->tx_task);
635 hci_req_unlock(hdev);
636 hci_dev_put(hdev);
637 return ret;
640 int hci_dev_reset_stat(__u16 dev)
642 struct hci_dev *hdev;
643 int ret = 0;
645 if (!(hdev = hci_dev_get(dev)))
646 return -ENODEV;
648 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
650 hci_dev_put(hdev);
652 return ret;
655 int hci_dev_cmd(unsigned int cmd, void __user *arg)
657 struct hci_dev *hdev;
658 struct hci_dev_req dr;
659 int err = 0;
661 if (copy_from_user(&dr, arg, sizeof(dr)))
662 return -EFAULT;
664 if (!(hdev = hci_dev_get(dr.dev_id)))
665 return -ENODEV;
667 switch (cmd) {
668 case HCISETAUTH:
669 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
670 msecs_to_jiffies(HCI_INIT_TIMEOUT));
671 break;
673 case HCISETENCRYPT:
674 if (!lmp_encrypt_capable(hdev)) {
675 err = -EOPNOTSUPP;
676 break;
679 if (!test_bit(HCI_AUTH, &hdev->flags)) {
680 /* Auth must be enabled first */
681 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
683 if (err)
684 break;
687 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
689 break;
691 case HCISETSCAN:
692 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694 break;
696 case HCISETPTYPE:
697 hdev->pkt_type = (__u16) dr.dev_opt;
698 break;
700 case HCISETLINKPOL:
701 hdev->link_policy = (__u16) dr.dev_opt;
702 break;
704 case HCISETLINKMODE:
705 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
706 break;
708 case HCISETACLMTU:
709 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
710 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
711 break;
713 case HCISETSCOMTU:
714 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
715 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
716 break;
718 default:
719 err = -EINVAL;
720 break;
722 hci_dev_put(hdev);
723 return err;
726 int hci_get_dev_list(void __user *arg)
728 struct hci_dev_list_req *dl;
729 struct hci_dev_req *dr;
730 struct list_head *p;
731 int n = 0, size, err;
732 __u16 dev_num;
734 if (get_user(dev_num, (__u16 __user *) arg))
735 return -EFAULT;
737 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
738 return -EINVAL;
740 size = sizeof(*dl) + dev_num * sizeof(*dr);
742 if (!(dl = kmalloc(size, GFP_KERNEL)))
743 return -ENOMEM;
745 dr = dl->dev_req;
747 read_lock_bh(&hci_dev_list_lock);
748 list_for_each(p, &hci_dev_list) {
749 struct hci_dev *hdev;
750 hdev = list_entry(p, struct hci_dev, list);
751 (dr + n)->dev_id = hdev->id;
752 (dr + n)->dev_opt = hdev->flags;
753 if (++n >= dev_num)
754 break;
756 read_unlock_bh(&hci_dev_list_lock);
758 dl->dev_num = n;
759 size = sizeof(*dl) + n * sizeof(*dr);
761 err = copy_to_user(arg, dl, size);
762 kfree(dl);
764 return err ? -EFAULT : 0;
767 int hci_get_dev_info(void __user *arg)
769 struct hci_dev *hdev;
770 struct hci_dev_info di;
771 int err = 0;
773 if (copy_from_user(&di, arg, sizeof(di)))
774 return -EFAULT;
776 if (!(hdev = hci_dev_get(di.dev_id)))
777 return -ENODEV;
779 strcpy(di.name, hdev->name);
780 di.bdaddr = hdev->bdaddr;
781 di.type = hdev->type;
782 di.flags = hdev->flags;
783 di.pkt_type = hdev->pkt_type;
784 di.acl_mtu = hdev->acl_mtu;
785 di.acl_pkts = hdev->acl_pkts;
786 di.sco_mtu = hdev->sco_mtu;
787 di.sco_pkts = hdev->sco_pkts;
788 di.link_policy = hdev->link_policy;
789 di.link_mode = hdev->link_mode;
791 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
792 memcpy(&di.features, &hdev->features, sizeof(di.features));
794 if (copy_to_user(arg, &di, sizeof(di)))
795 err = -EFAULT;
797 hci_dev_put(hdev);
799 return err;
802 /* ---- Interface to HCI drivers ---- */
804 /* Alloc HCI device */
805 struct hci_dev *hci_alloc_dev(void)
807 struct hci_dev *hdev;
809 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
810 if (!hdev)
811 return NULL;
813 skb_queue_head_init(&hdev->driver_init);
815 return hdev;
817 EXPORT_SYMBOL(hci_alloc_dev);
819 /* Free HCI device */
820 void hci_free_dev(struct hci_dev *hdev)
822 skb_queue_purge(&hdev->driver_init);
824 /* will free via device release */
825 put_device(&hdev->dev);
827 EXPORT_SYMBOL(hci_free_dev);
829 /* Register HCI device */
830 int hci_register_dev(struct hci_dev *hdev)
832 struct list_head *head = &hci_dev_list, *p;
833 int i, id = 0;
835 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
837 if (!hdev->open || !hdev->close || !hdev->destruct)
838 return -EINVAL;
840 write_lock_bh(&hci_dev_list_lock);
842 /* Find first available device id */
843 list_for_each(p, &hci_dev_list) {
844 if (list_entry(p, struct hci_dev, list)->id != id)
845 break;
846 head = p; id++;
849 sprintf(hdev->name, "hci%d", id);
850 hdev->id = id;
851 list_add(&hdev->list, head);
853 atomic_set(&hdev->refcnt, 1);
854 spin_lock_init(&hdev->lock);
856 hdev->flags = 0;
857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
858 hdev->esco_type = (ESCO_HV1);
859 hdev->link_mode = (HCI_LM_ACCEPT);
861 hdev->idle_timeout = 0;
862 hdev->sniff_max_interval = 800;
863 hdev->sniff_min_interval = 80;
865 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
866 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
867 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
869 skb_queue_head_init(&hdev->rx_q);
870 skb_queue_head_init(&hdev->cmd_q);
871 skb_queue_head_init(&hdev->raw_q);
873 for (i = 0; i < 3; i++)
874 hdev->reassembly[i] = NULL;
876 init_waitqueue_head(&hdev->req_wait_q);
877 init_MUTEX(&hdev->req_lock);
879 inquiry_cache_init(hdev);
881 hci_conn_hash_init(hdev);
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
885 atomic_set(&hdev->promisc, 0);
887 write_unlock_bh(&hci_dev_list_lock);
889 hci_register_sysfs(hdev);
891 hci_notify(hdev, HCI_DEV_REG);
893 return id;
895 EXPORT_SYMBOL(hci_register_dev);
897 /* Unregister HCI device */
898 int hci_unregister_dev(struct hci_dev *hdev)
900 int i;
902 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
904 hci_unregister_sysfs(hdev);
906 write_lock_bh(&hci_dev_list_lock);
907 list_del(&hdev->list);
908 write_unlock_bh(&hci_dev_list_lock);
910 hci_dev_do_close(hdev);
912 for (i = 0; i < 3; i++)
913 kfree_skb(hdev->reassembly[i]);
915 hci_notify(hdev, HCI_DEV_UNREG);
917 __hci_dev_put(hdev);
919 return 0;
921 EXPORT_SYMBOL(hci_unregister_dev);
923 /* Suspend HCI device */
924 int hci_suspend_dev(struct hci_dev *hdev)
926 hci_notify(hdev, HCI_DEV_SUSPEND);
927 return 0;
929 EXPORT_SYMBOL(hci_suspend_dev);
931 /* Resume HCI device */
932 int hci_resume_dev(struct hci_dev *hdev)
934 hci_notify(hdev, HCI_DEV_RESUME);
935 return 0;
937 EXPORT_SYMBOL(hci_resume_dev);
939 /* Receive packet type fragment */
940 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
942 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
945 return -EILSEQ;
947 while (count) {
948 struct sk_buff *skb = __reassembly(hdev, type);
949 struct { int expect; } *scb;
950 int len = 0;
952 if (!skb) {
953 /* Start of the frame */
955 switch (type) {
956 case HCI_EVENT_PKT:
957 if (count >= HCI_EVENT_HDR_SIZE) {
958 struct hci_event_hdr *h = data;
959 len = HCI_EVENT_HDR_SIZE + h->plen;
960 } else
961 return -EILSEQ;
962 break;
964 case HCI_ACLDATA_PKT:
965 if (count >= HCI_ACL_HDR_SIZE) {
966 struct hci_acl_hdr *h = data;
967 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
968 } else
969 return -EILSEQ;
970 break;
972 case HCI_SCODATA_PKT:
973 if (count >= HCI_SCO_HDR_SIZE) {
974 struct hci_sco_hdr *h = data;
975 len = HCI_SCO_HDR_SIZE + h->dlen;
976 } else
977 return -EILSEQ;
978 break;
981 skb = bt_skb_alloc(len, GFP_ATOMIC);
982 if (!skb) {
983 BT_ERR("%s no memory for packet", hdev->name);
984 return -ENOMEM;
987 skb->dev = (void *) hdev;
988 bt_cb(skb)->pkt_type = type;
990 __reassembly(hdev, type) = skb;
992 scb = (void *) skb->cb;
993 scb->expect = len;
994 } else {
995 /* Continuation */
997 scb = (void *) skb->cb;
998 len = scb->expect;
1001 len = min(len, count);
1003 memcpy(skb_put(skb, len), data, len);
1005 scb->expect -= len;
1007 if (scb->expect == 0) {
1008 /* Complete frame */
1010 __reassembly(hdev, type) = NULL;
1012 bt_cb(skb)->pkt_type = type;
1013 hci_recv_frame(skb);
1016 count -= len; data += len;
1019 return 0;
1021 EXPORT_SYMBOL(hci_recv_fragment);
1023 /* ---- Interface to upper protocols ---- */
1025 /* Register/Unregister protocols.
1026 * hci_task_lock is used to ensure that no tasks are running. */
1027 int hci_register_proto(struct hci_proto *hp)
1029 int err = 0;
1031 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1033 if (hp->id >= HCI_MAX_PROTO)
1034 return -EINVAL;
1036 write_lock_bh(&hci_task_lock);
1038 if (!hci_proto[hp->id])
1039 hci_proto[hp->id] = hp;
1040 else
1041 err = -EEXIST;
1043 write_unlock_bh(&hci_task_lock);
1045 return err;
1047 EXPORT_SYMBOL(hci_register_proto);
1049 int hci_unregister_proto(struct hci_proto *hp)
1051 int err = 0;
1053 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1055 if (hp->id >= HCI_MAX_PROTO)
1056 return -EINVAL;
1058 write_lock_bh(&hci_task_lock);
1060 if (hci_proto[hp->id])
1061 hci_proto[hp->id] = NULL;
1062 else
1063 err = -ENOENT;
1065 write_unlock_bh(&hci_task_lock);
1067 return err;
1069 EXPORT_SYMBOL(hci_unregister_proto);
1071 int hci_register_cb(struct hci_cb *cb)
1073 BT_DBG("%p name %s", cb, cb->name);
1075 write_lock_bh(&hci_cb_list_lock);
1076 list_add(&cb->list, &hci_cb_list);
1077 write_unlock_bh(&hci_cb_list_lock);
1079 return 0;
1081 EXPORT_SYMBOL(hci_register_cb);
1083 int hci_unregister_cb(struct hci_cb *cb)
1085 BT_DBG("%p name %s", cb, cb->name);
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_del(&cb->list);
1089 write_unlock_bh(&hci_cb_list_lock);
1091 return 0;
1093 EXPORT_SYMBOL(hci_unregister_cb);
1095 static int hci_send_frame(struct sk_buff *skb)
1097 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1099 if (!hdev) {
1100 kfree_skb(skb);
1101 return -ENODEV;
1104 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1106 if (atomic_read(&hdev->promisc)) {
1107 /* Time stamp */
1108 __net_timestamp(skb);
1110 hci_send_to_sock(hdev, skb);
1113 /* Get rid of skb owner, prior to sending to the driver. */
1114 skb_orphan(skb);
1116 return hdev->send(skb);
1119 /* Send HCI command */
1120 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1123 struct hci_command_hdr *hdr;
1124 struct sk_buff *skb;
1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1129 if (!skb) {
1130 BT_ERR("%s no memory for command", hdev->name);
1131 return -ENOMEM;
1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1135 hdr->opcode = cpu_to_le16(opcode);
1136 hdr->plen = plen;
1138 if (plen)
1139 memcpy(skb_put(skb, plen), param, plen);
1141 BT_DBG("skb len %d", skb->len);
1143 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1144 skb->dev = (void *) hdev;
1145 skb_queue_tail(&hdev->cmd_q, skb);
1146 hci_sched_cmd(hdev);
1148 return 0;
1151 /* Get data from the previously sent command */
1152 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1154 struct hci_command_hdr *hdr;
1156 if (!hdev->sent_cmd)
1157 return NULL;
1159 hdr = (void *) hdev->sent_cmd->data;
1161 if (hdr->opcode != cpu_to_le16(opcode))
1162 return NULL;
1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1169 /* Send ACL data */
1170 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1172 struct hci_acl_hdr *hdr;
1173 int len = skb->len;
1175 skb_push(skb, HCI_ACL_HDR_SIZE);
1176 skb_reset_transport_header(skb);
1177 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1178 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1179 hdr->dlen = cpu_to_le16(len);
1182 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1184 struct hci_dev *hdev = conn->hdev;
1185 struct sk_buff *list;
1187 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1189 skb->dev = (void *) hdev;
1190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1191 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1193 if (!(list = skb_shinfo(skb)->frag_list)) {
1194 /* Non fragmented */
1195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1197 skb_queue_tail(&conn->data_q, skb);
1198 } else {
1199 /* Fragmented */
1200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1202 skb_shinfo(skb)->frag_list = NULL;
1204 /* Queue all fragments atomically */
1205 spin_lock_bh(&conn->data_q.lock);
1207 __skb_queue_tail(&conn->data_q, skb);
1208 do {
1209 skb = list; list = list->next;
1211 skb->dev = (void *) hdev;
1212 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1213 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1215 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1217 __skb_queue_tail(&conn->data_q, skb);
1218 } while (list);
1220 spin_unlock_bh(&conn->data_q.lock);
1223 hci_sched_tx(hdev);
1224 return 0;
1226 EXPORT_SYMBOL(hci_send_acl);
1228 /* Send SCO data */
1229 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1231 struct hci_dev *hdev = conn->hdev;
1232 struct hci_sco_hdr hdr;
1234 BT_DBG("%s len %d", hdev->name, skb->len);
1236 if (skb->len > hdev->sco_mtu) {
1237 kfree_skb(skb);
1238 return -EINVAL;
1241 hdr.handle = cpu_to_le16(conn->handle);
1242 hdr.dlen = skb->len;
1244 skb_push(skb, HCI_SCO_HDR_SIZE);
1245 skb_reset_transport_header(skb);
1246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1248 skb->dev = (void *) hdev;
1249 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1250 skb_queue_tail(&conn->data_q, skb);
1251 hci_sched_tx(hdev);
1252 return 0;
1254 EXPORT_SYMBOL(hci_send_sco);
1256 /* ---- HCI TX task (outgoing data) ---- */
1258 /* HCI Connection scheduler */
1259 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1261 struct hci_conn_hash *h = &hdev->conn_hash;
1262 struct hci_conn *conn = NULL;
1263 int num = 0, min = ~0;
1264 struct list_head *p;
1266 /* We don't have to lock device here. Connections are always
1267 * added and removed with TX task disabled. */
1268 list_for_each(p, &h->list) {
1269 struct hci_conn *c;
1270 c = list_entry(p, struct hci_conn, list);
1272 if (c->type != type || c->state != BT_CONNECTED
1273 || skb_queue_empty(&c->data_q))
1274 continue;
1275 num++;
1277 if (c->sent < min) {
1278 min = c->sent;
1279 conn = c;
1283 if (conn) {
1284 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1285 int q = cnt / num;
1286 *quote = q ? q : 1;
1287 } else
1288 *quote = 0;
1290 BT_DBG("conn %p quote %d", conn, *quote);
1291 return conn;
1294 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1296 struct hci_conn_hash *h = &hdev->conn_hash;
1297 struct list_head *p;
1298 struct hci_conn *c;
1300 BT_ERR("%s ACL tx timeout", hdev->name);
1302 /* Kill stalled connections */
1303 list_for_each(p, &h->list) {
1304 c = list_entry(p, struct hci_conn, list);
1305 if (c->type == ACL_LINK && c->sent) {
1306 BT_ERR("%s killing stalled ACL connection %s",
1307 hdev->name, batostr(&c->dst));
1308 hci_acl_disconn(c, 0x13);
1313 static inline void hci_sched_acl(struct hci_dev *hdev)
1315 struct hci_conn *conn;
1316 struct sk_buff *skb;
1317 int quote;
1319 BT_DBG("%s", hdev->name);
1321 if (!test_bit(HCI_RAW, &hdev->flags)) {
1322 /* ACL tx timeout must be longer than maximum
1323 * link supervision timeout (40.9 seconds) */
1324 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1325 hci_acl_tx_to(hdev);
1328 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1330 BT_DBG("skb %p len %d", skb, skb->len);
1332 hci_conn_enter_active_mode(conn);
1334 hci_send_frame(skb);
1335 hdev->acl_last_tx = jiffies;
1337 hdev->acl_cnt--;
1338 conn->sent++;
1343 /* Schedule SCO */
1344 static inline void hci_sched_sco(struct hci_dev *hdev)
1346 struct hci_conn *conn;
1347 struct sk_buff *skb;
1348 int quote;
1350 BT_DBG("%s", hdev->name);
1352 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1353 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1354 BT_DBG("skb %p len %d", skb, skb->len);
1355 hci_send_frame(skb);
1357 conn->sent++;
1358 if (conn->sent == ~0)
1359 conn->sent = 0;
1364 static inline void hci_sched_esco(struct hci_dev *hdev)
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1368 int quote;
1370 BT_DBG("%s", hdev->name);
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1377 conn->sent++;
1378 if (conn->sent == ~0)
1379 conn->sent = 0;
1384 static void hci_tx_task(unsigned long arg)
1386 struct hci_dev *hdev = (struct hci_dev *) arg;
1387 struct sk_buff *skb;
1389 read_lock(&hci_task_lock);
1391 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1393 /* Schedule queues and send stuff to HCI driver */
1395 hci_sched_acl(hdev);
1397 hci_sched_sco(hdev);
1399 hci_sched_esco(hdev);
1401 /* Send next queued raw (unknown type) packet */
1402 while ((skb = skb_dequeue(&hdev->raw_q)))
1403 hci_send_frame(skb);
1405 read_unlock(&hci_task_lock);
1408 /* ----- HCI RX task (incoming data proccessing) ----- */
1410 /* ACL data packet */
1411 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1413 struct hci_acl_hdr *hdr = (void *) skb->data;
1414 struct hci_conn *conn;
1415 __u16 handle, flags;
1417 skb_pull(skb, HCI_ACL_HDR_SIZE);
1419 handle = __le16_to_cpu(hdr->handle);
1420 flags = hci_flags(handle);
1421 handle = hci_handle(handle);
1423 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1425 hdev->stat.acl_rx++;
1427 hci_dev_lock(hdev);
1428 conn = hci_conn_hash_lookup_handle(hdev, handle);
1429 hci_dev_unlock(hdev);
1431 if (conn) {
1432 register struct hci_proto *hp;
1434 hci_conn_enter_active_mode(conn);
1436 /* Send to upper protocol */
1437 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1438 hp->recv_acldata(conn, skb, flags);
1439 return;
1441 } else {
1442 BT_ERR("%s ACL packet for unknown connection handle %d",
1443 hdev->name, handle);
1446 kfree_skb(skb);
1449 /* SCO data packet */
1450 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1452 struct hci_sco_hdr *hdr = (void *) skb->data;
1453 struct hci_conn *conn;
1454 __u16 handle;
1456 skb_pull(skb, HCI_SCO_HDR_SIZE);
1458 handle = __le16_to_cpu(hdr->handle);
1460 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1462 hdev->stat.sco_rx++;
1464 hci_dev_lock(hdev);
1465 conn = hci_conn_hash_lookup_handle(hdev, handle);
1466 hci_dev_unlock(hdev);
1468 if (conn) {
1469 register struct hci_proto *hp;
1471 /* Send to upper protocol */
1472 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1473 hp->recv_scodata(conn, skb);
1474 return;
1476 } else {
1477 BT_ERR("%s SCO packet for unknown connection handle %d",
1478 hdev->name, handle);
1481 kfree_skb(skb);
1484 static void hci_rx_task(unsigned long arg)
1486 struct hci_dev *hdev = (struct hci_dev *) arg;
1487 struct sk_buff *skb;
1489 BT_DBG("%s", hdev->name);
1491 read_lock(&hci_task_lock);
1493 while ((skb = skb_dequeue(&hdev->rx_q))) {
1494 if (atomic_read(&hdev->promisc)) {
1495 /* Send copy to the sockets */
1496 hci_send_to_sock(hdev, skb);
1499 if (test_bit(HCI_RAW, &hdev->flags)) {
1500 kfree_skb(skb);
1501 continue;
1504 if (test_bit(HCI_INIT, &hdev->flags)) {
1505 /* Don't process data packets in this states. */
1506 switch (bt_cb(skb)->pkt_type) {
1507 case HCI_ACLDATA_PKT:
1508 case HCI_SCODATA_PKT:
1509 kfree_skb(skb);
1510 continue;
1514 /* Process frame */
1515 switch (bt_cb(skb)->pkt_type) {
1516 case HCI_EVENT_PKT:
1517 hci_event_packet(hdev, skb);
1518 break;
1520 case HCI_ACLDATA_PKT:
1521 BT_DBG("%s ACL data packet", hdev->name);
1522 hci_acldata_packet(hdev, skb);
1523 break;
1525 case HCI_SCODATA_PKT:
1526 BT_DBG("%s SCO data packet", hdev->name);
1527 hci_scodata_packet(hdev, skb);
1528 break;
1530 default:
1531 kfree_skb(skb);
1532 break;
1536 read_unlock(&hci_task_lock);
1539 static void hci_cmd_task(unsigned long arg)
1541 struct hci_dev *hdev = (struct hci_dev *) arg;
1542 struct sk_buff *skb;
1544 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1546 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1547 BT_ERR("%s command tx timeout", hdev->name);
1548 atomic_set(&hdev->cmd_cnt, 1);
1551 /* Send queued commands */
1552 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1553 if (hdev->sent_cmd)
1554 kfree_skb(hdev->sent_cmd);
1556 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1557 atomic_dec(&hdev->cmd_cnt);
1558 hci_send_frame(skb);
1559 hdev->cmd_last_tx = jiffies;
1560 } else {
1561 skb_queue_head(&hdev->cmd_q, skb);
1562 hci_sched_cmd(hdev);