2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/module.h>
28 #include <linux/kmod.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
55 static void hci_cmd_task(unsigned long arg
);
56 static void hci_rx_task(unsigned long arg
);
57 static void hci_tx_task(unsigned long arg
);
58 static void hci_notify(struct hci_dev
*hdev
, int event
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, int result
)
98 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
100 if (hdev
->req_status
== HCI_REQ_PEND
) {
101 hdev
->req_result
= result
;
102 hdev
->req_status
= HCI_REQ_DONE
;
103 wake_up_interruptible(&hdev
->req_wait_q
);
107 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
109 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
111 if (hdev
->req_status
== HCI_REQ_PEND
) {
112 hdev
->req_result
= err
;
113 hdev
->req_status
= HCI_REQ_CANCELED
;
114 wake_up_interruptible(&hdev
->req_wait_q
);
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
120 unsigned long opt
, __u32 timeout
)
122 DECLARE_WAITQUEUE(wait
, current
);
125 BT_DBG("%s start", hdev
->name
);
127 hdev
->req_status
= HCI_REQ_PEND
;
129 add_wait_queue(&hdev
->req_wait_q
, &wait
);
130 set_current_state(TASK_INTERRUPTIBLE
);
133 schedule_timeout(timeout
);
135 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
137 if (signal_pending(current
))
140 switch (hdev
->req_status
) {
142 err
= -bt_err(hdev
->req_result
);
145 case HCI_REQ_CANCELED
:
146 err
= -hdev
->req_result
;
154 hdev
->req_status
= hdev
->req_result
= 0;
156 BT_DBG("%s end: err %d", hdev
->name
, err
);
161 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
162 unsigned long opt
, __u32 timeout
)
166 /* Serialize all requests */
168 ret
= __hci_request(hdev
, req
, opt
, timeout
);
169 hci_req_unlock(hdev
);
174 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
176 BT_DBG("%s %ld", hdev
->name
, opt
);
179 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
182 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
188 BT_DBG("%s %ld", hdev
->name
, opt
);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
194 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
195 skb
->dev
= (void *) hdev
;
196 skb_queue_tail(&hdev
->cmd_q
, skb
);
199 skb_queue_purge(&hdev
->driver_init
);
201 /* Mandatory initialization */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT
, &hdev
->quirks
))
205 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_LOCAL_FEATURES
, 0, NULL
);
210 /* Read Local Version */
211 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_LOCAL_VERSION
, 0, NULL
);
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BUFFER_SIZE
, 0, NULL
);
217 /* Host buffer size */
219 struct hci_cp_host_buffer_size cp
;
220 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
221 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
222 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
223 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
224 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
228 /* Read BD Address */
229 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BD_ADDR
, 0, NULL
);
231 /* Read Voice Setting */
232 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_READ_VOICE_SETTING
, 0, NULL
);
234 /* Optional initialization */
236 /* Clear Event Filters */
237 flt_type
= HCI_FLT_CLEAR_ALL
;
238 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_SET_EVENT_FLT
, 1, &flt_type
);
240 /* Page timeout ~20 secs */
241 param
= cpu_to_le16(0x8000);
242 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_PG_TIMEOUT
, 2, ¶m
);
244 /* Connection accept timeout ~20 secs */
245 param
= cpu_to_le16(0x7d00);
246 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_CA_TIMEOUT
, 2, ¶m
);
249 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
253 BT_DBG("%s %x", hdev
->name
, scan
);
255 /* Inquiry and Page scans */
256 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_SCAN_ENABLE
, 1, &scan
);
259 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
263 BT_DBG("%s %x", hdev
->name
, auth
);
266 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_AUTH_ENABLE
, 1, &auth
);
269 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
273 BT_DBG("%s %x", hdev
->name
, encrypt
);
276 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
279 /* Get HCI device by index.
280 * Device is held on return. */
281 struct hci_dev
*hci_dev_get(int index
)
283 struct hci_dev
*hdev
= NULL
;
291 read_lock(&hci_dev_list_lock
);
292 list_for_each(p
, &hci_dev_list
) {
293 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
294 if (d
->id
== index
) {
295 hdev
= hci_dev_hold(d
);
299 read_unlock(&hci_dev_list_lock
);
303 /* ---- Inquiry support ---- */
304 static void inquiry_cache_flush(struct hci_dev
*hdev
)
306 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
307 struct inquiry_entry
*next
= cache
->list
, *e
;
309 BT_DBG("cache %p", cache
);
318 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
320 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
321 struct inquiry_entry
*e
;
323 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
325 for (e
= cache
->list
; e
; e
= e
->next
)
326 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
331 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
333 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
334 struct inquiry_entry
*e
;
336 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
338 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
339 /* Entry not in the cache. Add new one. */
340 if (!(e
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
342 e
->next
= cache
->list
;
346 memcpy(&e
->data
, data
, sizeof(*data
));
347 e
->timestamp
= jiffies
;
348 cache
->timestamp
= jiffies
;
351 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
353 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
354 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
355 struct inquiry_entry
*e
;
358 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
359 struct inquiry_data
*data
= &e
->data
;
360 bacpy(&info
->bdaddr
, &data
->bdaddr
);
361 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
362 info
->pscan_period_mode
= data
->pscan_period_mode
;
363 info
->pscan_mode
= data
->pscan_mode
;
364 memcpy(info
->dev_class
, data
->dev_class
, 3);
365 info
->clock_offset
= data
->clock_offset
;
369 BT_DBG("cache %p, copied %d", cache
, copied
);
373 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
375 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
376 struct hci_cp_inquiry cp
;
378 BT_DBG("%s", hdev
->name
);
380 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
384 memcpy(&cp
.lap
, &ir
->lap
, 3);
385 cp
.length
= ir
->length
;
386 cp
.num_rsp
= ir
->num_rsp
;
387 hci_send_cmd(hdev
, OGF_LINK_CTL
, OCF_INQUIRY
, sizeof(cp
), &cp
);
390 int hci_inquiry(void __user
*arg
)
392 __u8 __user
*ptr
= arg
;
393 struct hci_inquiry_req ir
;
394 struct hci_dev
*hdev
;
395 int err
= 0, do_inquiry
= 0, max_rsp
;
399 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
402 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
405 hci_dev_lock_bh(hdev
);
406 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
407 inquiry_cache_empty(hdev
) ||
408 ir
.flags
& IREQ_CACHE_FLUSH
) {
409 inquiry_cache_flush(hdev
);
412 hci_dev_unlock_bh(hdev
);
414 timeo
= ir
.length
* msecs_to_jiffies(2000);
415 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
418 /* for unlimited number of responses we will use buffer with 255 entries */
419 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
421 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
422 * copy it to the user space.
424 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
429 hci_dev_lock_bh(hdev
);
430 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
431 hci_dev_unlock_bh(hdev
);
433 BT_DBG("num_rsp %d", ir
.num_rsp
);
435 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
437 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
450 /* ---- HCI ioctl helpers ---- */
452 int hci_dev_open(__u16 dev
)
454 struct hci_dev
*hdev
;
457 if (!(hdev
= hci_dev_get(dev
)))
460 BT_DBG("%s %p", hdev
->name
, hdev
);
464 if (test_bit(HCI_UP
, &hdev
->flags
)) {
469 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
470 set_bit(HCI_RAW
, &hdev
->flags
);
472 if (hdev
->open(hdev
)) {
477 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
478 atomic_set(&hdev
->cmd_cnt
, 1);
479 set_bit(HCI_INIT
, &hdev
->flags
);
481 //__hci_request(hdev, hci_reset_req, 0, HZ);
482 ret
= __hci_request(hdev
, hci_init_req
, 0,
483 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
485 clear_bit(HCI_INIT
, &hdev
->flags
);
490 set_bit(HCI_UP
, &hdev
->flags
);
491 hci_notify(hdev
, HCI_DEV_UP
);
493 /* Init failed, cleanup */
494 tasklet_kill(&hdev
->rx_task
);
495 tasklet_kill(&hdev
->tx_task
);
496 tasklet_kill(&hdev
->cmd_task
);
498 skb_queue_purge(&hdev
->cmd_q
);
499 skb_queue_purge(&hdev
->rx_q
);
504 if (hdev
->sent_cmd
) {
505 kfree_skb(hdev
->sent_cmd
);
506 hdev
->sent_cmd
= NULL
;
514 hci_req_unlock(hdev
);
519 static int hci_dev_do_close(struct hci_dev
*hdev
)
521 BT_DBG("%s %p", hdev
->name
, hdev
);
523 hci_req_cancel(hdev
, ENODEV
);
526 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
527 hci_req_unlock(hdev
);
531 /* Kill RX and TX tasks */
532 tasklet_kill(&hdev
->rx_task
);
533 tasklet_kill(&hdev
->tx_task
);
535 hci_dev_lock_bh(hdev
);
536 inquiry_cache_flush(hdev
);
537 hci_conn_hash_flush(hdev
);
538 hci_dev_unlock_bh(hdev
);
540 hci_notify(hdev
, HCI_DEV_DOWN
);
546 skb_queue_purge(&hdev
->cmd_q
);
547 atomic_set(&hdev
->cmd_cnt
, 1);
548 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
549 set_bit(HCI_INIT
, &hdev
->flags
);
550 __hci_request(hdev
, hci_reset_req
, 0,
551 msecs_to_jiffies(250));
552 clear_bit(HCI_INIT
, &hdev
->flags
);
556 tasklet_kill(&hdev
->cmd_task
);
559 skb_queue_purge(&hdev
->rx_q
);
560 skb_queue_purge(&hdev
->cmd_q
);
561 skb_queue_purge(&hdev
->raw_q
);
563 /* Drop last sent command */
564 if (hdev
->sent_cmd
) {
565 kfree_skb(hdev
->sent_cmd
);
566 hdev
->sent_cmd
= NULL
;
569 /* After this point our queues are empty
570 * and no tasks are scheduled. */
576 hci_req_unlock(hdev
);
582 int hci_dev_close(__u16 dev
)
584 struct hci_dev
*hdev
;
587 if (!(hdev
= hci_dev_get(dev
)))
589 err
= hci_dev_do_close(hdev
);
594 int hci_dev_reset(__u16 dev
)
596 struct hci_dev
*hdev
;
599 if (!(hdev
= hci_dev_get(dev
)))
603 tasklet_disable(&hdev
->tx_task
);
605 if (!test_bit(HCI_UP
, &hdev
->flags
))
609 skb_queue_purge(&hdev
->rx_q
);
610 skb_queue_purge(&hdev
->cmd_q
);
612 hci_dev_lock_bh(hdev
);
613 inquiry_cache_flush(hdev
);
614 hci_conn_hash_flush(hdev
);
615 hci_dev_unlock_bh(hdev
);
620 atomic_set(&hdev
->cmd_cnt
, 1);
621 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
623 if (!test_bit(HCI_RAW
, &hdev
->flags
))
624 ret
= __hci_request(hdev
, hci_reset_req
, 0,
625 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
628 tasklet_enable(&hdev
->tx_task
);
629 hci_req_unlock(hdev
);
634 int hci_dev_reset_stat(__u16 dev
)
636 struct hci_dev
*hdev
;
639 if (!(hdev
= hci_dev_get(dev
)))
642 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
649 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
651 struct hci_dev
*hdev
;
652 struct hci_dev_req dr
;
655 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
658 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
663 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
664 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
668 if (!lmp_encrypt_capable(hdev
)) {
673 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
674 /* Auth must be enabled first */
675 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
681 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
686 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
687 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
691 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
695 hdev
->link_policy
= (__u16
) dr
.dev_opt
;
699 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) & (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
703 hdev
->acl_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
704 hdev
->acl_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
708 hdev
->sco_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
709 hdev
->sco_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
720 int hci_get_dev_list(void __user
*arg
)
722 struct hci_dev_list_req
*dl
;
723 struct hci_dev_req
*dr
;
725 int n
= 0, size
, err
;
728 if (get_user(dev_num
, (__u16 __user
*) arg
))
731 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
734 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
736 if (!(dl
= kmalloc(size
, GFP_KERNEL
)))
741 read_lock_bh(&hci_dev_list_lock
);
742 list_for_each(p
, &hci_dev_list
) {
743 struct hci_dev
*hdev
;
744 hdev
= list_entry(p
, struct hci_dev
, list
);
745 (dr
+ n
)->dev_id
= hdev
->id
;
746 (dr
+ n
)->dev_opt
= hdev
->flags
;
750 read_unlock_bh(&hci_dev_list_lock
);
753 size
= sizeof(*dl
) + n
* sizeof(*dr
);
755 err
= copy_to_user(arg
, dl
, size
);
758 return err
? -EFAULT
: 0;
761 int hci_get_dev_info(void __user
*arg
)
763 struct hci_dev
*hdev
;
764 struct hci_dev_info di
;
767 if (copy_from_user(&di
, arg
, sizeof(di
)))
770 if (!(hdev
= hci_dev_get(di
.dev_id
)))
773 strcpy(di
.name
, hdev
->name
);
774 di
.bdaddr
= hdev
->bdaddr
;
775 di
.type
= hdev
->type
;
776 di
.flags
= hdev
->flags
;
777 di
.pkt_type
= hdev
->pkt_type
;
778 di
.acl_mtu
= hdev
->acl_mtu
;
779 di
.acl_pkts
= hdev
->acl_pkts
;
780 di
.sco_mtu
= hdev
->sco_mtu
;
781 di
.sco_pkts
= hdev
->sco_pkts
;
782 di
.link_policy
= hdev
->link_policy
;
783 di
.link_mode
= hdev
->link_mode
;
785 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
786 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
788 if (copy_to_user(arg
, &di
, sizeof(di
)))
796 /* ---- Interface to HCI drivers ---- */
798 /* Alloc HCI device */
799 struct hci_dev
*hci_alloc_dev(void)
801 struct hci_dev
*hdev
;
803 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
807 skb_queue_head_init(&hdev
->driver_init
);
811 EXPORT_SYMBOL(hci_alloc_dev
);
813 /* Free HCI device */
814 void hci_free_dev(struct hci_dev
*hdev
)
816 skb_queue_purge(&hdev
->driver_init
);
818 /* will free via device release */
819 put_device(&hdev
->dev
);
821 EXPORT_SYMBOL(hci_free_dev
);
823 /* Register HCI device */
824 int hci_register_dev(struct hci_dev
*hdev
)
826 struct list_head
*head
= &hci_dev_list
, *p
;
829 BT_DBG("%p name %s type %d owner %p", hdev
, hdev
->name
, hdev
->type
, hdev
->owner
);
831 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
834 write_lock_bh(&hci_dev_list_lock
);
836 /* Find first available device id */
837 list_for_each(p
, &hci_dev_list
) {
838 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
843 sprintf(hdev
->name
, "hci%d", id
);
845 list_add(&hdev
->list
, head
);
847 atomic_set(&hdev
->refcnt
, 1);
848 spin_lock_init(&hdev
->lock
);
851 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
852 hdev
->esco_type
= (ESCO_HV1
);
853 hdev
->link_mode
= (HCI_LM_ACCEPT
);
855 hdev
->idle_timeout
= 0;
856 hdev
->sniff_max_interval
= 800;
857 hdev
->sniff_min_interval
= 80;
859 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
860 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
861 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
863 skb_queue_head_init(&hdev
->rx_q
);
864 skb_queue_head_init(&hdev
->cmd_q
);
865 skb_queue_head_init(&hdev
->raw_q
);
867 for (i
= 0; i
< 3; i
++)
868 hdev
->reassembly
[i
] = NULL
;
870 init_waitqueue_head(&hdev
->req_wait_q
);
871 init_MUTEX(&hdev
->req_lock
);
873 inquiry_cache_init(hdev
);
875 hci_conn_hash_init(hdev
);
877 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
879 atomic_set(&hdev
->promisc
, 0);
881 write_unlock_bh(&hci_dev_list_lock
);
883 hci_register_sysfs(hdev
);
885 hci_notify(hdev
, HCI_DEV_REG
);
889 EXPORT_SYMBOL(hci_register_dev
);
891 /* Unregister HCI device */
892 int hci_unregister_dev(struct hci_dev
*hdev
)
896 BT_DBG("%p name %s type %d", hdev
, hdev
->name
, hdev
->type
);
898 hci_unregister_sysfs(hdev
);
900 write_lock_bh(&hci_dev_list_lock
);
901 list_del(&hdev
->list
);
902 write_unlock_bh(&hci_dev_list_lock
);
904 hci_dev_do_close(hdev
);
906 for (i
= 0; i
< 3; i
++)
907 kfree_skb(hdev
->reassembly
[i
]);
909 hci_notify(hdev
, HCI_DEV_UNREG
);
915 EXPORT_SYMBOL(hci_unregister_dev
);
917 /* Suspend HCI device */
918 int hci_suspend_dev(struct hci_dev
*hdev
)
920 hci_notify(hdev
, HCI_DEV_SUSPEND
);
923 EXPORT_SYMBOL(hci_suspend_dev
);
925 /* Resume HCI device */
926 int hci_resume_dev(struct hci_dev
*hdev
)
928 hci_notify(hdev
, HCI_DEV_RESUME
);
931 EXPORT_SYMBOL(hci_resume_dev
);
933 /* Receive packet type fragment */
934 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
936 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
938 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
942 struct sk_buff
*skb
= __reassembly(hdev
, type
);
943 struct { int expect
; } *scb
;
947 /* Start of the frame */
951 if (count
>= HCI_EVENT_HDR_SIZE
) {
952 struct hci_event_hdr
*h
= data
;
953 len
= HCI_EVENT_HDR_SIZE
+ h
->plen
;
958 case HCI_ACLDATA_PKT
:
959 if (count
>= HCI_ACL_HDR_SIZE
) {
960 struct hci_acl_hdr
*h
= data
;
961 len
= HCI_ACL_HDR_SIZE
+ __le16_to_cpu(h
->dlen
);
966 case HCI_SCODATA_PKT
:
967 if (count
>= HCI_SCO_HDR_SIZE
) {
968 struct hci_sco_hdr
*h
= data
;
969 len
= HCI_SCO_HDR_SIZE
+ h
->dlen
;
975 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
977 BT_ERR("%s no memory for packet", hdev
->name
);
981 skb
->dev
= (void *) hdev
;
982 bt_cb(skb
)->pkt_type
= type
;
984 __reassembly(hdev
, type
) = skb
;
986 scb
= (void *) skb
->cb
;
991 scb
= (void *) skb
->cb
;
995 len
= min(len
, count
);
997 memcpy(skb_put(skb
, len
), data
, len
);
1001 if (scb
->expect
== 0) {
1002 /* Complete frame */
1004 __reassembly(hdev
, type
) = NULL
;
1006 bt_cb(skb
)->pkt_type
= type
;
1007 hci_recv_frame(skb
);
1010 count
-= len
; data
+= len
;
1015 EXPORT_SYMBOL(hci_recv_fragment
);
1017 /* ---- Interface to upper protocols ---- */
1019 /* Register/Unregister protocols.
1020 * hci_task_lock is used to ensure that no tasks are running. */
1021 int hci_register_proto(struct hci_proto
*hp
)
1025 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1027 if (hp
->id
>= HCI_MAX_PROTO
)
1030 write_lock_bh(&hci_task_lock
);
1032 if (!hci_proto
[hp
->id
])
1033 hci_proto
[hp
->id
] = hp
;
1037 write_unlock_bh(&hci_task_lock
);
1041 EXPORT_SYMBOL(hci_register_proto
);
1043 int hci_unregister_proto(struct hci_proto
*hp
)
1047 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1049 if (hp
->id
>= HCI_MAX_PROTO
)
1052 write_lock_bh(&hci_task_lock
);
1054 if (hci_proto
[hp
->id
])
1055 hci_proto
[hp
->id
] = NULL
;
1059 write_unlock_bh(&hci_task_lock
);
1063 EXPORT_SYMBOL(hci_unregister_proto
);
1065 int hci_register_cb(struct hci_cb
*cb
)
1067 BT_DBG("%p name %s", cb
, cb
->name
);
1069 write_lock_bh(&hci_cb_list_lock
);
1070 list_add(&cb
->list
, &hci_cb_list
);
1071 write_unlock_bh(&hci_cb_list_lock
);
1075 EXPORT_SYMBOL(hci_register_cb
);
1077 int hci_unregister_cb(struct hci_cb
*cb
)
1079 BT_DBG("%p name %s", cb
, cb
->name
);
1081 write_lock_bh(&hci_cb_list_lock
);
1082 list_del(&cb
->list
);
1083 write_unlock_bh(&hci_cb_list_lock
);
1087 EXPORT_SYMBOL(hci_unregister_cb
);
1089 static int hci_send_frame(struct sk_buff
*skb
)
1091 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1098 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1100 if (atomic_read(&hdev
->promisc
)) {
1102 __net_timestamp(skb
);
1104 hci_send_to_sock(hdev
, skb
);
1107 /* Get rid of skb owner, prior to sending to the driver. */
1110 return hdev
->send(skb
);
1113 /* Send HCI command */
1114 int hci_send_cmd(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
, __u32 plen
, void *param
)
1116 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1117 struct hci_command_hdr
*hdr
;
1118 struct sk_buff
*skb
;
1120 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev
->name
, ogf
, ocf
, plen
);
1122 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1124 BT_ERR("%s no memory for command", hdev
->name
);
1128 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1129 hdr
->opcode
= cpu_to_le16(hci_opcode_pack(ogf
, ocf
));
1133 memcpy(skb_put(skb
, plen
), param
, plen
);
1135 BT_DBG("skb len %d", skb
->len
);
1137 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1138 skb
->dev
= (void *) hdev
;
1139 skb_queue_tail(&hdev
->cmd_q
, skb
);
1140 hci_sched_cmd(hdev
);
1145 /* Get data from the previously sent command */
1146 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
)
1148 struct hci_command_hdr
*hdr
;
1150 if (!hdev
->sent_cmd
)
1153 hdr
= (void *) hdev
->sent_cmd
->data
;
1155 if (hdr
->opcode
!= cpu_to_le16(hci_opcode_pack(ogf
, ocf
)))
1158 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev
->name
, ogf
, ocf
);
1160 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1164 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1166 struct hci_acl_hdr
*hdr
;
1169 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1170 skb_reset_transport_header(skb
);
1171 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1172 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1173 hdr
->dlen
= cpu_to_le16(len
);
1176 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1178 struct hci_dev
*hdev
= conn
->hdev
;
1179 struct sk_buff
*list
;
1181 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1183 skb
->dev
= (void *) hdev
;
1184 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1185 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1187 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1188 /* Non fragmented */
1189 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1191 skb_queue_tail(&conn
->data_q
, skb
);
1194 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1196 skb_shinfo(skb
)->frag_list
= NULL
;
1198 /* Queue all fragments atomically */
1199 spin_lock_bh(&conn
->data_q
.lock
);
1201 __skb_queue_tail(&conn
->data_q
, skb
);
1203 skb
= list
; list
= list
->next
;
1205 skb
->dev
= (void *) hdev
;
1206 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1207 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1209 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1211 __skb_queue_tail(&conn
->data_q
, skb
);
1214 spin_unlock_bh(&conn
->data_q
.lock
);
1220 EXPORT_SYMBOL(hci_send_acl
);
1223 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1225 struct hci_dev
*hdev
= conn
->hdev
;
1226 struct hci_sco_hdr hdr
;
1228 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1230 if (skb
->len
> hdev
->sco_mtu
) {
1235 hdr
.handle
= cpu_to_le16(conn
->handle
);
1236 hdr
.dlen
= skb
->len
;
1238 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1239 skb_reset_transport_header(skb
);
1240 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1242 skb
->dev
= (void *) hdev
;
1243 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1244 skb_queue_tail(&conn
->data_q
, skb
);
1248 EXPORT_SYMBOL(hci_send_sco
);
1250 /* ---- HCI TX task (outgoing data) ---- */
1252 /* HCI Connection scheduler */
1253 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1255 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1256 struct hci_conn
*conn
= NULL
;
1257 int num
= 0, min
= ~0;
1258 struct list_head
*p
;
1260 /* We don't have to lock device here. Connections are always
1261 * added and removed with TX task disabled. */
1262 list_for_each(p
, &h
->list
) {
1264 c
= list_entry(p
, struct hci_conn
, list
);
1266 if (c
->type
!= type
|| c
->state
!= BT_CONNECTED
1267 || skb_queue_empty(&c
->data_q
))
1271 if (c
->sent
< min
) {
1278 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1284 BT_DBG("conn %p quote %d", conn
, *quote
);
1288 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1290 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1291 struct list_head
*p
;
1294 BT_ERR("%s ACL tx timeout", hdev
->name
);
1296 /* Kill stalled connections */
1297 list_for_each(p
, &h
->list
) {
1298 c
= list_entry(p
, struct hci_conn
, list
);
1299 if (c
->type
== ACL_LINK
&& c
->sent
) {
1300 BT_ERR("%s killing stalled ACL connection %s",
1301 hdev
->name
, batostr(&c
->dst
));
1302 hci_acl_disconn(c
, 0x13);
1307 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1309 struct hci_conn
*conn
;
1310 struct sk_buff
*skb
;
1313 BT_DBG("%s", hdev
->name
);
1315 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1316 /* ACL tx timeout must be longer than maximum
1317 * link supervision timeout (40.9 seconds) */
1318 if (!hdev
->acl_cnt
&& (jiffies
- hdev
->acl_last_tx
) > (HZ
* 45))
1319 hci_acl_tx_to(hdev
);
1322 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1323 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1324 BT_DBG("skb %p len %d", skb
, skb
->len
);
1326 hci_conn_enter_active_mode(conn
);
1328 hci_send_frame(skb
);
1329 hdev
->acl_last_tx
= jiffies
;
1338 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1340 struct hci_conn
*conn
;
1341 struct sk_buff
*skb
;
1344 BT_DBG("%s", hdev
->name
);
1346 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1347 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1348 BT_DBG("skb %p len %d", skb
, skb
->len
);
1349 hci_send_frame(skb
);
1352 if (conn
->sent
== ~0)
1358 static void hci_tx_task(unsigned long arg
)
1360 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1361 struct sk_buff
*skb
;
1363 read_lock(&hci_task_lock
);
1365 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1367 /* Schedule queues and send stuff to HCI driver */
1369 hci_sched_acl(hdev
);
1371 hci_sched_sco(hdev
);
1373 /* Send next queued raw (unknown type) packet */
1374 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1375 hci_send_frame(skb
);
1377 read_unlock(&hci_task_lock
);
1380 /* ----- HCI RX task (incoming data proccessing) ----- */
1382 /* ACL data packet */
1383 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1385 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1386 struct hci_conn
*conn
;
1387 __u16 handle
, flags
;
1389 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1391 handle
= __le16_to_cpu(hdr
->handle
);
1392 flags
= hci_flags(handle
);
1393 handle
= hci_handle(handle
);
1395 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1397 hdev
->stat
.acl_rx
++;
1400 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1401 hci_dev_unlock(hdev
);
1404 register struct hci_proto
*hp
;
1406 hci_conn_enter_active_mode(conn
);
1408 /* Send to upper protocol */
1409 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1410 hp
->recv_acldata(conn
, skb
, flags
);
1414 BT_ERR("%s ACL packet for unknown connection handle %d",
1415 hdev
->name
, handle
);
1421 /* SCO data packet */
1422 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1424 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1425 struct hci_conn
*conn
;
1428 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1430 handle
= __le16_to_cpu(hdr
->handle
);
1432 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1434 hdev
->stat
.sco_rx
++;
1437 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1438 hci_dev_unlock(hdev
);
1441 register struct hci_proto
*hp
;
1443 /* Send to upper protocol */
1444 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1445 hp
->recv_scodata(conn
, skb
);
1449 BT_ERR("%s SCO packet for unknown connection handle %d",
1450 hdev
->name
, handle
);
1456 static void hci_rx_task(unsigned long arg
)
1458 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1459 struct sk_buff
*skb
;
1461 BT_DBG("%s", hdev
->name
);
1463 read_lock(&hci_task_lock
);
1465 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1466 if (atomic_read(&hdev
->promisc
)) {
1467 /* Send copy to the sockets */
1468 hci_send_to_sock(hdev
, skb
);
1471 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1476 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1477 /* Don't process data packets in this states. */
1478 switch (bt_cb(skb
)->pkt_type
) {
1479 case HCI_ACLDATA_PKT
:
1480 case HCI_SCODATA_PKT
:
1487 switch (bt_cb(skb
)->pkt_type
) {
1489 hci_event_packet(hdev
, skb
);
1492 case HCI_ACLDATA_PKT
:
1493 BT_DBG("%s ACL data packet", hdev
->name
);
1494 hci_acldata_packet(hdev
, skb
);
1497 case HCI_SCODATA_PKT
:
1498 BT_DBG("%s SCO data packet", hdev
->name
);
1499 hci_scodata_packet(hdev
, skb
);
1508 read_unlock(&hci_task_lock
);
1511 static void hci_cmd_task(unsigned long arg
)
1513 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1514 struct sk_buff
*skb
;
1516 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1518 if (!atomic_read(&hdev
->cmd_cnt
) && (jiffies
- hdev
->cmd_last_tx
) > HZ
) {
1519 BT_ERR("%s command tx timeout", hdev
->name
);
1520 atomic_set(&hdev
->cmd_cnt
, 1);
1523 /* Send queued commands */
1524 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1526 kfree_skb(hdev
->sent_cmd
);
1528 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1529 atomic_dec(&hdev
->cmd_cnt
);
1530 hci_send_frame(skb
);
1531 hdev
->cmd_last_tx
= jiffies
;
1533 skb_queue_head(&hdev
->cmd_q
, skb
);
1534 hci_sched_cmd(hdev
);