2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
56 static void hci_cmd_task(unsigned long arg
);
57 static void hci_rx_task(unsigned long arg
);
58 static void hci_tx_task(unsigned long arg
);
59 static void hci_notify(struct hci_dev
*hdev
, int event
);
61 static DEFINE_RWLOCK(hci_task_lock
);
64 LIST_HEAD(hci_dev_list
);
65 DEFINE_RWLOCK(hci_dev_list_lock
);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list
);
69 DEFINE_RWLOCK(hci_cb_list_lock
);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
75 /* HCI notifiers list */
76 static struct notifier_block
*hci_notifier
;
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block
*nb
)
82 return notifier_chain_register(&hci_notifier
, nb
);
85 int hci_unregister_notifier(struct notifier_block
*nb
)
87 return notifier_chain_unregister(&hci_notifier
, nb
);
90 void hci_notify(struct hci_dev
*hdev
, int event
)
92 notifier_call_chain(&hci_notifier
, event
, hdev
);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev
*hdev
, int result
)
99 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
101 if (hdev
->req_status
== HCI_REQ_PEND
) {
102 hdev
->req_result
= result
;
103 hdev
->req_status
= HCI_REQ_DONE
;
104 wake_up_interruptible(&hdev
->req_wait_q
);
108 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
110 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
112 if (hdev
->req_status
== HCI_REQ_PEND
) {
113 hdev
->req_result
= err
;
114 hdev
->req_status
= HCI_REQ_CANCELED
;
115 wake_up_interruptible(&hdev
->req_wait_q
);
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
121 unsigned long opt
, __u32 timeout
)
123 DECLARE_WAITQUEUE(wait
, current
);
126 BT_DBG("%s start", hdev
->name
);
128 hdev
->req_status
= HCI_REQ_PEND
;
130 add_wait_queue(&hdev
->req_wait_q
, &wait
);
131 set_current_state(TASK_INTERRUPTIBLE
);
134 schedule_timeout(timeout
);
136 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
138 if (signal_pending(current
))
141 switch (hdev
->req_status
) {
143 err
= -bt_err(hdev
->req_result
);
146 case HCI_REQ_CANCELED
:
147 err
= -hdev
->req_result
;
155 hdev
->req_status
= hdev
->req_result
= 0;
157 BT_DBG("%s end: err %d", hdev
->name
, err
);
162 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
163 unsigned long opt
, __u32 timeout
)
167 /* Serialize all requests */
169 ret
= __hci_request(hdev
, req
, opt
, timeout
);
170 hci_req_unlock(hdev
);
175 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
177 BT_DBG("%s %ld", hdev
->name
, opt
);
180 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
183 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
188 BT_DBG("%s %ld", hdev
->name
, opt
);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
194 skb
->pkt_type
= HCI_COMMAND_PKT
;
195 skb
->dev
= (void *) hdev
;
196 skb_queue_tail(&hdev
->cmd_q
, skb
);
199 skb_queue_purge(&hdev
->driver_init
);
201 /* Mandatory initialization */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT
, &hdev
->quirks
))
205 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_LOCAL_FEATURES
, 0, NULL
);
210 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
211 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BUFFER_SIZE
, 0, NULL
);
214 /* Host buffer size */
216 struct hci_cp_host_buffer_size cp
;
217 cp
.acl_mtu
= __cpu_to_le16(HCI_MAX_ACL_SIZE
);
218 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
219 cp
.acl_max_pkt
= __cpu_to_le16(0xffff);
220 cp
.sco_max_pkt
= __cpu_to_le16(0xffff);
221 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
225 /* Read BD Address */
226 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BD_ADDR
, 0, NULL
);
228 /* Read Voice Setting */
229 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_READ_VOICE_SETTING
, 0, NULL
);
231 /* Optional initialization */
233 /* Clear Event Filters */
235 struct hci_cp_set_event_flt cp
;
236 cp
.flt_type
= HCI_FLT_CLEAR_ALL
;
237 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_SET_EVENT_FLT
, sizeof(cp
), &cp
);
240 /* Page timeout ~20 secs */
241 param
= __cpu_to_le16(0x8000);
242 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_PG_TIMEOUT
, 2, ¶m
);
244 /* Connection accept timeout ~20 secs */
245 param
= __cpu_to_le16(0x7d00);
246 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_CA_TIMEOUT
, 2, ¶m
);
249 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
253 BT_DBG("%s %x", hdev
->name
, scan
);
255 /* Inquiry and Page scans */
256 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_SCAN_ENABLE
, 1, &scan
);
259 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
263 BT_DBG("%s %x", hdev
->name
, auth
);
266 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_AUTH_ENABLE
, 1, &auth
);
269 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
273 BT_DBG("%s %x", hdev
->name
, encrypt
);
276 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
279 /* Get HCI device by index.
280 * Device is held on return. */
281 struct hci_dev
*hci_dev_get(int index
)
283 struct hci_dev
*hdev
= NULL
;
291 read_lock(&hci_dev_list_lock
);
292 list_for_each(p
, &hci_dev_list
) {
293 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
294 if (d
->id
== index
) {
295 hdev
= hci_dev_hold(d
);
299 read_unlock(&hci_dev_list_lock
);
302 EXPORT_SYMBOL(hci_dev_get
);
304 /* ---- Inquiry support ---- */
305 static void inquiry_cache_flush(struct hci_dev
*hdev
)
307 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
308 struct inquiry_entry
*next
= cache
->list
, *e
;
310 BT_DBG("cache %p", cache
);
319 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
321 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
322 struct inquiry_entry
*e
;
324 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
326 for (e
= cache
->list
; e
; e
= e
->next
)
327 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
332 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
334 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
335 struct inquiry_entry
*e
;
337 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
339 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
340 /* Entry not in the cache. Add new one. */
341 if (!(e
= kmalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
343 memset(e
, 0, sizeof(struct inquiry_entry
));
344 e
->next
= cache
->list
;
348 memcpy(&e
->data
, data
, sizeof(*data
));
349 e
->timestamp
= jiffies
;
350 cache
->timestamp
= jiffies
;
353 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
355 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
356 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
357 struct inquiry_entry
*e
;
360 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
361 struct inquiry_data
*data
= &e
->data
;
362 bacpy(&info
->bdaddr
, &data
->bdaddr
);
363 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
364 info
->pscan_period_mode
= data
->pscan_period_mode
;
365 info
->pscan_mode
= data
->pscan_mode
;
366 memcpy(info
->dev_class
, data
->dev_class
, 3);
367 info
->clock_offset
= data
->clock_offset
;
371 BT_DBG("cache %p, copied %d", cache
, copied
);
375 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
377 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
378 struct hci_cp_inquiry cp
;
380 BT_DBG("%s", hdev
->name
);
382 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
386 memcpy(&cp
.lap
, &ir
->lap
, 3);
387 cp
.length
= ir
->length
;
388 cp
.num_rsp
= ir
->num_rsp
;
389 hci_send_cmd(hdev
, OGF_LINK_CTL
, OCF_INQUIRY
, sizeof(cp
), &cp
);
392 int hci_inquiry(void __user
*arg
)
394 __u8 __user
*ptr
= arg
;
395 struct hci_inquiry_req ir
;
396 struct hci_dev
*hdev
;
397 int err
= 0, do_inquiry
= 0, max_rsp
;
401 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
404 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
407 hci_dev_lock_bh(hdev
);
408 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
409 inquiry_cache_empty(hdev
) ||
410 ir
.flags
& IREQ_CACHE_FLUSH
) {
411 inquiry_cache_flush(hdev
);
414 hci_dev_unlock_bh(hdev
);
416 timeo
= ir
.length
* 2 * HZ
;
417 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
420 /* for unlimited number of responses we will use buffer with 255 entries */
421 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
423 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
424 * copy it to the user space.
426 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
431 hci_dev_lock_bh(hdev
);
432 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
433 hci_dev_unlock_bh(hdev
);
435 BT_DBG("num_rsp %d", ir
.num_rsp
);
437 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
439 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
452 /* ---- HCI ioctl helpers ---- */
454 int hci_dev_open(__u16 dev
)
456 struct hci_dev
*hdev
;
459 if (!(hdev
= hci_dev_get(dev
)))
462 BT_DBG("%s %p", hdev
->name
, hdev
);
466 if (test_bit(HCI_UP
, &hdev
->flags
)) {
471 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
472 set_bit(HCI_RAW
, &hdev
->flags
);
474 if (hdev
->open(hdev
)) {
479 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
480 atomic_set(&hdev
->cmd_cnt
, 1);
481 set_bit(HCI_INIT
, &hdev
->flags
);
483 //__hci_request(hdev, hci_reset_req, 0, HZ);
484 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
486 clear_bit(HCI_INIT
, &hdev
->flags
);
491 set_bit(HCI_UP
, &hdev
->flags
);
492 hci_notify(hdev
, HCI_DEV_UP
);
494 /* Init failed, cleanup */
495 tasklet_kill(&hdev
->rx_task
);
496 tasklet_kill(&hdev
->tx_task
);
497 tasklet_kill(&hdev
->cmd_task
);
499 skb_queue_purge(&hdev
->cmd_q
);
500 skb_queue_purge(&hdev
->rx_q
);
505 if (hdev
->sent_cmd
) {
506 kfree_skb(hdev
->sent_cmd
);
507 hdev
->sent_cmd
= NULL
;
515 hci_req_unlock(hdev
);
520 static int hci_dev_do_close(struct hci_dev
*hdev
)
522 BT_DBG("%s %p", hdev
->name
, hdev
);
524 hci_req_cancel(hdev
, ENODEV
);
527 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
528 hci_req_unlock(hdev
);
532 /* Kill RX and TX tasks */
533 tasklet_kill(&hdev
->rx_task
);
534 tasklet_kill(&hdev
->tx_task
);
536 hci_dev_lock_bh(hdev
);
537 inquiry_cache_flush(hdev
);
538 hci_conn_hash_flush(hdev
);
539 hci_dev_unlock_bh(hdev
);
541 hci_notify(hdev
, HCI_DEV_DOWN
);
547 skb_queue_purge(&hdev
->cmd_q
);
548 atomic_set(&hdev
->cmd_cnt
, 1);
549 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
550 set_bit(HCI_INIT
, &hdev
->flags
);
551 __hci_request(hdev
, hci_reset_req
, 0, HZ
/4);
552 clear_bit(HCI_INIT
, &hdev
->flags
);
556 tasklet_kill(&hdev
->cmd_task
);
559 skb_queue_purge(&hdev
->rx_q
);
560 skb_queue_purge(&hdev
->cmd_q
);
561 skb_queue_purge(&hdev
->raw_q
);
563 /* Drop last sent command */
564 if (hdev
->sent_cmd
) {
565 kfree_skb(hdev
->sent_cmd
);
566 hdev
->sent_cmd
= NULL
;
569 /* After this point our queues are empty
570 * and no tasks are scheduled. */
576 hci_req_unlock(hdev
);
582 int hci_dev_close(__u16 dev
)
584 struct hci_dev
*hdev
;
587 if (!(hdev
= hci_dev_get(dev
)))
589 err
= hci_dev_do_close(hdev
);
594 int hci_dev_reset(__u16 dev
)
596 struct hci_dev
*hdev
;
599 if (!(hdev
= hci_dev_get(dev
)))
603 tasklet_disable(&hdev
->tx_task
);
605 if (!test_bit(HCI_UP
, &hdev
->flags
))
609 skb_queue_purge(&hdev
->rx_q
);
610 skb_queue_purge(&hdev
->cmd_q
);
612 hci_dev_lock_bh(hdev
);
613 inquiry_cache_flush(hdev
);
614 hci_conn_hash_flush(hdev
);
615 hci_dev_unlock_bh(hdev
);
620 atomic_set(&hdev
->cmd_cnt
, 1);
621 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
623 if (!test_bit(HCI_RAW
, &hdev
->flags
))
624 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
627 tasklet_enable(&hdev
->tx_task
);
628 hci_req_unlock(hdev
);
633 int hci_dev_reset_stat(__u16 dev
)
635 struct hci_dev
*hdev
;
638 if (!(hdev
= hci_dev_get(dev
)))
641 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
648 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
650 struct hci_dev
*hdev
;
651 struct hci_dev_req dr
;
654 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
657 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
662 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
, HCI_INIT_TIMEOUT
);
666 if (!lmp_encrypt_capable(hdev
)) {
671 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
672 /* Auth must be enabled first */
673 err
= hci_request(hdev
, hci_auth_req
,
674 dr
.dev_opt
, HCI_INIT_TIMEOUT
);
679 err
= hci_request(hdev
, hci_encrypt_req
,
680 dr
.dev_opt
, HCI_INIT_TIMEOUT
);
684 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
, HCI_INIT_TIMEOUT
);
688 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
692 hdev
->link_policy
= (__u16
) dr
.dev_opt
;
696 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) & (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
700 hdev
->acl_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
701 hdev
->acl_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
705 hdev
->sco_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
706 hdev
->sco_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
717 int hci_get_dev_list(void __user
*arg
)
719 struct hci_dev_list_req
*dl
;
720 struct hci_dev_req
*dr
;
722 int n
= 0, size
, err
;
725 if (get_user(dev_num
, (__u16 __user
*) arg
))
728 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
731 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
733 if (!(dl
= kmalloc(size
, GFP_KERNEL
)))
738 read_lock_bh(&hci_dev_list_lock
);
739 list_for_each(p
, &hci_dev_list
) {
740 struct hci_dev
*hdev
;
741 hdev
= list_entry(p
, struct hci_dev
, list
);
742 (dr
+ n
)->dev_id
= hdev
->id
;
743 (dr
+ n
)->dev_opt
= hdev
->flags
;
747 read_unlock_bh(&hci_dev_list_lock
);
750 size
= sizeof(*dl
) + n
* sizeof(*dr
);
752 err
= copy_to_user(arg
, dl
, size
);
755 return err
? -EFAULT
: 0;
758 int hci_get_dev_info(void __user
*arg
)
760 struct hci_dev
*hdev
;
761 struct hci_dev_info di
;
764 if (copy_from_user(&di
, arg
, sizeof(di
)))
767 if (!(hdev
= hci_dev_get(di
.dev_id
)))
770 strcpy(di
.name
, hdev
->name
);
771 di
.bdaddr
= hdev
->bdaddr
;
772 di
.type
= hdev
->type
;
773 di
.flags
= hdev
->flags
;
774 di
.pkt_type
= hdev
->pkt_type
;
775 di
.acl_mtu
= hdev
->acl_mtu
;
776 di
.acl_pkts
= hdev
->acl_pkts
;
777 di
.sco_mtu
= hdev
->sco_mtu
;
778 di
.sco_pkts
= hdev
->sco_pkts
;
779 di
.link_policy
= hdev
->link_policy
;
780 di
.link_mode
= hdev
->link_mode
;
782 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
783 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
785 if (copy_to_user(arg
, &di
, sizeof(di
)))
793 /* ---- Interface to HCI drivers ---- */
795 /* Alloc HCI device */
796 struct hci_dev
*hci_alloc_dev(void)
798 struct hci_dev
*hdev
;
800 hdev
= kmalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
804 memset(hdev
, 0, sizeof(struct hci_dev
));
806 skb_queue_head_init(&hdev
->driver_init
);
810 EXPORT_SYMBOL(hci_alloc_dev
);
812 /* Free HCI device */
813 void hci_free_dev(struct hci_dev
*hdev
)
815 skb_queue_purge(&hdev
->driver_init
);
817 /* will free via class release */
818 class_device_put(&hdev
->class_dev
);
820 EXPORT_SYMBOL(hci_free_dev
);
822 /* Register HCI device */
823 int hci_register_dev(struct hci_dev
*hdev
)
825 struct list_head
*head
= &hci_dev_list
, *p
;
828 BT_DBG("%p name %s type %d owner %p", hdev
, hdev
->name
, hdev
->type
, hdev
->owner
);
830 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
833 write_lock_bh(&hci_dev_list_lock
);
835 /* Find first available device id */
836 list_for_each(p
, &hci_dev_list
) {
837 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
842 sprintf(hdev
->name
, "hci%d", id
);
844 list_add(&hdev
->list
, head
);
846 atomic_set(&hdev
->refcnt
, 1);
847 spin_lock_init(&hdev
->lock
);
850 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
851 hdev
->link_mode
= (HCI_LM_ACCEPT
);
853 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
854 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
855 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
857 skb_queue_head_init(&hdev
->rx_q
);
858 skb_queue_head_init(&hdev
->cmd_q
);
859 skb_queue_head_init(&hdev
->raw_q
);
861 init_waitqueue_head(&hdev
->req_wait_q
);
862 init_MUTEX(&hdev
->req_lock
);
864 inquiry_cache_init(hdev
);
866 hci_conn_hash_init(hdev
);
868 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
870 atomic_set(&hdev
->promisc
, 0);
872 write_unlock_bh(&hci_dev_list_lock
);
874 hci_register_sysfs(hdev
);
876 hci_notify(hdev
, HCI_DEV_REG
);
880 EXPORT_SYMBOL(hci_register_dev
);
882 /* Unregister HCI device */
883 int hci_unregister_dev(struct hci_dev
*hdev
)
885 BT_DBG("%p name %s type %d", hdev
, hdev
->name
, hdev
->type
);
887 hci_unregister_sysfs(hdev
);
889 write_lock_bh(&hci_dev_list_lock
);
890 list_del(&hdev
->list
);
891 write_unlock_bh(&hci_dev_list_lock
);
893 hci_dev_do_close(hdev
);
895 hci_notify(hdev
, HCI_DEV_UNREG
);
900 EXPORT_SYMBOL(hci_unregister_dev
);
902 /* Suspend HCI device */
903 int hci_suspend_dev(struct hci_dev
*hdev
)
905 hci_notify(hdev
, HCI_DEV_SUSPEND
);
908 EXPORT_SYMBOL(hci_suspend_dev
);
910 /* Resume HCI device */
911 int hci_resume_dev(struct hci_dev
*hdev
)
913 hci_notify(hdev
, HCI_DEV_RESUME
);
916 EXPORT_SYMBOL(hci_resume_dev
);
918 /* ---- Interface to upper protocols ---- */
920 /* Register/Unregister protocols.
921 * hci_task_lock is used to ensure that no tasks are running. */
922 int hci_register_proto(struct hci_proto
*hp
)
926 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
928 if (hp
->id
>= HCI_MAX_PROTO
)
931 write_lock_bh(&hci_task_lock
);
933 if (!hci_proto
[hp
->id
])
934 hci_proto
[hp
->id
] = hp
;
938 write_unlock_bh(&hci_task_lock
);
942 EXPORT_SYMBOL(hci_register_proto
);
944 int hci_unregister_proto(struct hci_proto
*hp
)
948 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
950 if (hp
->id
>= HCI_MAX_PROTO
)
953 write_lock_bh(&hci_task_lock
);
955 if (hci_proto
[hp
->id
])
956 hci_proto
[hp
->id
] = NULL
;
960 write_unlock_bh(&hci_task_lock
);
964 EXPORT_SYMBOL(hci_unregister_proto
);
966 int hci_register_cb(struct hci_cb
*cb
)
968 BT_DBG("%p name %s", cb
, cb
->name
);
970 write_lock_bh(&hci_cb_list_lock
);
971 list_add(&cb
->list
, &hci_cb_list
);
972 write_unlock_bh(&hci_cb_list_lock
);
976 EXPORT_SYMBOL(hci_register_cb
);
978 int hci_unregister_cb(struct hci_cb
*cb
)
980 BT_DBG("%p name %s", cb
, cb
->name
);
982 write_lock_bh(&hci_cb_list_lock
);
984 write_unlock_bh(&hci_cb_list_lock
);
988 EXPORT_SYMBOL(hci_unregister_cb
);
990 static int hci_send_frame(struct sk_buff
*skb
)
992 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
999 BT_DBG("%s type %d len %d", hdev
->name
, skb
->pkt_type
, skb
->len
);
1001 if (atomic_read(&hdev
->promisc
)) {
1003 do_gettimeofday(&skb
->stamp
);
1005 hci_send_to_sock(hdev
, skb
);
1008 /* Get rid of skb owner, prior to sending to the driver. */
1011 return hdev
->send(skb
);
1014 /* Send HCI command */
1015 int hci_send_cmd(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
, __u32 plen
, void *param
)
1017 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1018 struct hci_command_hdr
*hdr
;
1019 struct sk_buff
*skb
;
1021 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev
->name
, ogf
, ocf
, plen
);
1023 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1025 BT_ERR("%s Can't allocate memory for HCI command", hdev
->name
);
1029 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1030 hdr
->opcode
= __cpu_to_le16(hci_opcode_pack(ogf
, ocf
));
1034 memcpy(skb_put(skb
, plen
), param
, plen
);
1036 BT_DBG("skb len %d", skb
->len
);
1038 skb
->pkt_type
= HCI_COMMAND_PKT
;
1039 skb
->dev
= (void *) hdev
;
1040 skb_queue_tail(&hdev
->cmd_q
, skb
);
1041 hci_sched_cmd(hdev
);
1045 EXPORT_SYMBOL(hci_send_cmd
);
1047 /* Get data from the previously sent command */
1048 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
)
1050 struct hci_command_hdr
*hdr
;
1052 if (!hdev
->sent_cmd
)
1055 hdr
= (void *) hdev
->sent_cmd
->data
;
1057 if (hdr
->opcode
!= __cpu_to_le16(hci_opcode_pack(ogf
, ocf
)))
1060 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev
->name
, ogf
, ocf
);
1062 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1066 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1068 struct hci_acl_hdr
*hdr
;
1071 hdr
= (struct hci_acl_hdr
*) skb_push(skb
, HCI_ACL_HDR_SIZE
);
1072 hdr
->handle
= __cpu_to_le16(hci_handle_pack(handle
, flags
));
1073 hdr
->dlen
= __cpu_to_le16(len
);
1075 skb
->h
.raw
= (void *) hdr
;
1078 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1080 struct hci_dev
*hdev
= conn
->hdev
;
1081 struct sk_buff
*list
;
1083 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1085 skb
->dev
= (void *) hdev
;
1086 skb
->pkt_type
= HCI_ACLDATA_PKT
;
1087 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1089 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1090 /* Non fragmented */
1091 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1093 skb_queue_tail(&conn
->data_q
, skb
);
1096 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1098 skb_shinfo(skb
)->frag_list
= NULL
;
1100 /* Queue all fragments atomically */
1101 spin_lock_bh(&conn
->data_q
.lock
);
1103 __skb_queue_tail(&conn
->data_q
, skb
);
1105 skb
= list
; list
= list
->next
;
1107 skb
->dev
= (void *) hdev
;
1108 skb
->pkt_type
= HCI_ACLDATA_PKT
;
1109 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1111 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1113 __skb_queue_tail(&conn
->data_q
, skb
);
1116 spin_unlock_bh(&conn
->data_q
.lock
);
1122 EXPORT_SYMBOL(hci_send_acl
);
1125 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1127 struct hci_dev
*hdev
= conn
->hdev
;
1128 struct hci_sco_hdr hdr
;
1130 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1132 if (skb
->len
> hdev
->sco_mtu
) {
1137 hdr
.handle
= __cpu_to_le16(conn
->handle
);
1138 hdr
.dlen
= skb
->len
;
1140 skb
->h
.raw
= skb_push(skb
, HCI_SCO_HDR_SIZE
);
1141 memcpy(skb
->h
.raw
, &hdr
, HCI_SCO_HDR_SIZE
);
1143 skb
->dev
= (void *) hdev
;
1144 skb
->pkt_type
= HCI_SCODATA_PKT
;
1145 skb_queue_tail(&conn
->data_q
, skb
);
1149 EXPORT_SYMBOL(hci_send_sco
);
1151 /* ---- HCI TX task (outgoing data) ---- */
1153 /* HCI Connection scheduler */
1154 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1156 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1157 struct hci_conn
*conn
= NULL
;
1158 int num
= 0, min
= ~0;
1159 struct list_head
*p
;
1161 /* We don't have to lock device here. Connections are always
1162 * added and removed with TX task disabled. */
1163 list_for_each(p
, &h
->list
) {
1165 c
= list_entry(p
, struct hci_conn
, list
);
1167 if (c
->type
!= type
|| c
->state
!= BT_CONNECTED
1168 || skb_queue_empty(&c
->data_q
))
1172 if (c
->sent
< min
) {
1179 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1185 BT_DBG("conn %p quote %d", conn
, *quote
);
1189 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1191 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1192 struct list_head
*p
;
1195 BT_ERR("%s ACL tx timeout", hdev
->name
);
1197 /* Kill stalled connections */
1198 list_for_each(p
, &h
->list
) {
1199 c
= list_entry(p
, struct hci_conn
, list
);
1200 if (c
->type
== ACL_LINK
&& c
->sent
) {
1201 BT_ERR("%s killing stalled ACL connection %s",
1202 hdev
->name
, batostr(&c
->dst
));
1203 hci_acl_disconn(c
, 0x13);
1208 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1210 struct hci_conn
*conn
;
1211 struct sk_buff
*skb
;
1214 BT_DBG("%s", hdev
->name
);
1216 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1217 /* ACL tx timeout must be longer than maximum
1218 * link supervision timeout (40.9 seconds) */
1219 if (!hdev
->acl_cnt
&& (jiffies
- hdev
->acl_last_tx
) > (HZ
* 45))
1220 hci_acl_tx_to(hdev
);
1223 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1224 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1225 BT_DBG("skb %p len %d", skb
, skb
->len
);
1226 hci_send_frame(skb
);
1227 hdev
->acl_last_tx
= jiffies
;
1236 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1238 struct hci_conn
*conn
;
1239 struct sk_buff
*skb
;
1242 BT_DBG("%s", hdev
->name
);
1244 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1245 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1246 BT_DBG("skb %p len %d", skb
, skb
->len
);
1247 hci_send_frame(skb
);
1250 if (conn
->sent
== ~0)
1256 static void hci_tx_task(unsigned long arg
)
1258 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1259 struct sk_buff
*skb
;
1261 read_lock(&hci_task_lock
);
1263 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1265 /* Schedule queues and send stuff to HCI driver */
1267 hci_sched_acl(hdev
);
1269 hci_sched_sco(hdev
);
1271 /* Send next queued raw (unknown type) packet */
1272 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1273 hci_send_frame(skb
);
1275 read_unlock(&hci_task_lock
);
1278 /* ----- HCI RX task (incoming data proccessing) ----- */
1280 /* ACL data packet */
1281 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1283 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1284 struct hci_conn
*conn
;
1285 __u16 handle
, flags
;
1287 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1289 handle
= __le16_to_cpu(hdr
->handle
);
1290 flags
= hci_flags(handle
);
1291 handle
= hci_handle(handle
);
1293 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1295 hdev
->stat
.acl_rx
++;
1298 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1299 hci_dev_unlock(hdev
);
1302 register struct hci_proto
*hp
;
1304 /* Send to upper protocol */
1305 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1306 hp
->recv_acldata(conn
, skb
, flags
);
1310 BT_ERR("%s ACL packet for unknown connection handle %d",
1311 hdev
->name
, handle
);
1317 /* SCO data packet */
1318 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1320 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1321 struct hci_conn
*conn
;
1324 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1326 handle
= __le16_to_cpu(hdr
->handle
);
1328 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1330 hdev
->stat
.sco_rx
++;
1333 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1334 hci_dev_unlock(hdev
);
1337 register struct hci_proto
*hp
;
1339 /* Send to upper protocol */
1340 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1341 hp
->recv_scodata(conn
, skb
);
1345 BT_ERR("%s SCO packet for unknown connection handle %d",
1346 hdev
->name
, handle
);
1352 void hci_rx_task(unsigned long arg
)
1354 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1355 struct sk_buff
*skb
;
1357 BT_DBG("%s", hdev
->name
);
1359 read_lock(&hci_task_lock
);
1361 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1362 if (atomic_read(&hdev
->promisc
)) {
1363 /* Send copy to the sockets */
1364 hci_send_to_sock(hdev
, skb
);
1367 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1372 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1373 /* Don't process data packets in this states. */
1374 switch (skb
->pkt_type
) {
1375 case HCI_ACLDATA_PKT
:
1376 case HCI_SCODATA_PKT
:
1383 switch (skb
->pkt_type
) {
1385 hci_event_packet(hdev
, skb
);
1388 case HCI_ACLDATA_PKT
:
1389 BT_DBG("%s ACL data packet", hdev
->name
);
1390 hci_acldata_packet(hdev
, skb
);
1393 case HCI_SCODATA_PKT
:
1394 BT_DBG("%s SCO data packet", hdev
->name
);
1395 hci_scodata_packet(hdev
, skb
);
1404 read_unlock(&hci_task_lock
);
1407 static void hci_cmd_task(unsigned long arg
)
1409 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1410 struct sk_buff
*skb
;
1412 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1414 if (!atomic_read(&hdev
->cmd_cnt
) && (jiffies
- hdev
->cmd_last_tx
) > HZ
) {
1415 BT_ERR("%s command tx timeout", hdev
->name
);
1416 atomic_set(&hdev
->cmd_cnt
, 1);
1419 /* Send queued commands */
1420 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1422 kfree_skb(hdev
->sent_cmd
);
1424 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1425 atomic_dec(&hdev
->cmd_cnt
);
1426 hci_send_frame(skb
);
1427 hdev
->cmd_last_tx
= jiffies
;
1429 skb_queue_head(&hdev
->cmd_q
, skb
);
1430 hci_sched_cmd(hdev
);