2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev
*hdev
)
360 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
361 struct inquiry_entry
*next
= cache
->list
, *e
;
363 BT_DBG("cache %p", cache
);
372 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
374 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
375 struct inquiry_entry
*e
;
377 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
379 for (e
= cache
->list
; e
; e
= e
->next
)
380 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
385 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
387 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
388 struct inquiry_entry
*ie
;
390 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
392 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
394 /* Entry not in the cache. Add new one. */
395 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
399 ie
->next
= cache
->list
;
403 memcpy(&ie
->data
, data
, sizeof(*data
));
404 ie
->timestamp
= jiffies
;
405 cache
->timestamp
= jiffies
;
408 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
410 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
411 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
412 struct inquiry_entry
*e
;
415 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
416 struct inquiry_data
*data
= &e
->data
;
417 bacpy(&info
->bdaddr
, &data
->bdaddr
);
418 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
419 info
->pscan_period_mode
= data
->pscan_period_mode
;
420 info
->pscan_mode
= data
->pscan_mode
;
421 memcpy(info
->dev_class
, data
->dev_class
, 3);
422 info
->clock_offset
= data
->clock_offset
;
426 BT_DBG("cache %p, copied %d", cache
, copied
);
430 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
432 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
433 struct hci_cp_inquiry cp
;
435 BT_DBG("%s", hdev
->name
);
437 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
441 memcpy(&cp
.lap
, &ir
->lap
, 3);
442 cp
.length
= ir
->length
;
443 cp
.num_rsp
= ir
->num_rsp
;
444 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
447 int hci_inquiry(void __user
*arg
)
449 __u8 __user
*ptr
= arg
;
450 struct hci_inquiry_req ir
;
451 struct hci_dev
*hdev
;
452 int err
= 0, do_inquiry
= 0, max_rsp
;
456 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
459 hdev
= hci_dev_get(ir
.dev_id
);
464 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
465 inquiry_cache_empty(hdev
) ||
466 ir
.flags
& IREQ_CACHE_FLUSH
) {
467 inquiry_cache_flush(hdev
);
470 hci_dev_unlock(hdev
);
472 timeo
= ir
.length
* msecs_to_jiffies(2000);
475 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
486 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
493 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
494 hci_dev_unlock(hdev
);
496 BT_DBG("num_rsp %d", ir
.num_rsp
);
498 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
500 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
513 /* ---- HCI ioctl helpers ---- */
515 int hci_dev_open(__u16 dev
)
517 struct hci_dev
*hdev
;
520 hdev
= hci_dev_get(dev
);
524 BT_DBG("%s %p", hdev
->name
, hdev
);
528 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
533 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
538 if (test_bit(HCI_UP
, &hdev
->flags
)) {
543 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
544 set_bit(HCI_RAW
, &hdev
->flags
);
546 /* Treat all non BR/EDR controllers as raw devices if
547 enable_hs is not set */
548 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
549 set_bit(HCI_RAW
, &hdev
->flags
);
551 if (hdev
->open(hdev
)) {
556 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
557 atomic_set(&hdev
->cmd_cnt
, 1);
558 set_bit(HCI_INIT
, &hdev
->flags
);
559 hdev
->init_last_cmd
= 0;
561 ret
= __hci_request(hdev
, hci_init_req
, 0,
562 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
564 if (lmp_host_le_capable(hdev
))
565 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
566 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
568 clear_bit(HCI_INIT
, &hdev
->flags
);
573 set_bit(HCI_UP
, &hdev
->flags
);
574 hci_notify(hdev
, HCI_DEV_UP
);
575 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
577 mgmt_powered(hdev
, 1);
578 hci_dev_unlock(hdev
);
581 /* Init failed, cleanup */
582 flush_work(&hdev
->tx_work
);
583 flush_work(&hdev
->cmd_work
);
584 flush_work(&hdev
->rx_work
);
586 skb_queue_purge(&hdev
->cmd_q
);
587 skb_queue_purge(&hdev
->rx_q
);
592 if (hdev
->sent_cmd
) {
593 kfree_skb(hdev
->sent_cmd
);
594 hdev
->sent_cmd
= NULL
;
602 hci_req_unlock(hdev
);
607 static int hci_dev_do_close(struct hci_dev
*hdev
)
609 BT_DBG("%s %p", hdev
->name
, hdev
);
611 hci_req_cancel(hdev
, ENODEV
);
614 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
615 del_timer_sync(&hdev
->cmd_timer
);
616 hci_req_unlock(hdev
);
620 /* Flush RX and TX works */
621 flush_work(&hdev
->tx_work
);
622 flush_work(&hdev
->rx_work
);
624 if (hdev
->discov_timeout
> 0) {
625 cancel_delayed_work(&hdev
->discov_off
);
626 hdev
->discov_timeout
= 0;
629 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
630 cancel_delayed_work(&hdev
->power_off
);
632 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->flags
))
633 cancel_delayed_work(&hdev
->service_cache
);
636 inquiry_cache_flush(hdev
);
637 hci_conn_hash_flush(hdev
);
638 hci_dev_unlock(hdev
);
640 hci_notify(hdev
, HCI_DEV_DOWN
);
646 skb_queue_purge(&hdev
->cmd_q
);
647 atomic_set(&hdev
->cmd_cnt
, 1);
648 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
649 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
650 set_bit(HCI_INIT
, &hdev
->flags
);
651 __hci_request(hdev
, hci_reset_req
, 0,
652 msecs_to_jiffies(250));
653 clear_bit(HCI_INIT
, &hdev
->flags
);
657 flush_work(&hdev
->cmd_work
);
660 skb_queue_purge(&hdev
->rx_q
);
661 skb_queue_purge(&hdev
->cmd_q
);
662 skb_queue_purge(&hdev
->raw_q
);
664 /* Drop last sent command */
665 if (hdev
->sent_cmd
) {
666 del_timer_sync(&hdev
->cmd_timer
);
667 kfree_skb(hdev
->sent_cmd
);
668 hdev
->sent_cmd
= NULL
;
671 /* After this point our queues are empty
672 * and no tasks are scheduled. */
676 mgmt_powered(hdev
, 0);
677 hci_dev_unlock(hdev
);
682 hci_req_unlock(hdev
);
688 int hci_dev_close(__u16 dev
)
690 struct hci_dev
*hdev
;
693 hdev
= hci_dev_get(dev
);
696 err
= hci_dev_do_close(hdev
);
701 int hci_dev_reset(__u16 dev
)
703 struct hci_dev
*hdev
;
706 hdev
= hci_dev_get(dev
);
712 if (!test_bit(HCI_UP
, &hdev
->flags
))
716 skb_queue_purge(&hdev
->rx_q
);
717 skb_queue_purge(&hdev
->cmd_q
);
720 inquiry_cache_flush(hdev
);
721 hci_conn_hash_flush(hdev
);
722 hci_dev_unlock(hdev
);
727 atomic_set(&hdev
->cmd_cnt
, 1);
728 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
730 if (!test_bit(HCI_RAW
, &hdev
->flags
))
731 ret
= __hci_request(hdev
, hci_reset_req
, 0,
732 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
735 hci_req_unlock(hdev
);
740 int hci_dev_reset_stat(__u16 dev
)
742 struct hci_dev
*hdev
;
745 hdev
= hci_dev_get(dev
);
749 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
756 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
758 struct hci_dev
*hdev
;
759 struct hci_dev_req dr
;
762 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
765 hdev
= hci_dev_get(dr
.dev_id
);
771 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
772 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
776 if (!lmp_encrypt_capable(hdev
)) {
781 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
782 /* Auth must be enabled first */
783 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
789 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
794 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
799 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
800 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
804 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
805 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
809 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
813 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
814 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
818 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
819 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
831 int hci_get_dev_list(void __user
*arg
)
833 struct hci_dev
*hdev
;
834 struct hci_dev_list_req
*dl
;
835 struct hci_dev_req
*dr
;
836 int n
= 0, size
, err
;
839 if (get_user(dev_num
, (__u16 __user
*) arg
))
842 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
845 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
847 dl
= kzalloc(size
, GFP_KERNEL
);
853 read_lock(&hci_dev_list_lock
);
854 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
855 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
856 cancel_delayed_work(&hdev
->power_off
);
858 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
859 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
861 (dr
+ n
)->dev_id
= hdev
->id
;
862 (dr
+ n
)->dev_opt
= hdev
->flags
;
867 read_unlock(&hci_dev_list_lock
);
870 size
= sizeof(*dl
) + n
* sizeof(*dr
);
872 err
= copy_to_user(arg
, dl
, size
);
875 return err
? -EFAULT
: 0;
878 int hci_get_dev_info(void __user
*arg
)
880 struct hci_dev
*hdev
;
881 struct hci_dev_info di
;
884 if (copy_from_user(&di
, arg
, sizeof(di
)))
887 hdev
= hci_dev_get(di
.dev_id
);
891 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
892 cancel_delayed_work_sync(&hdev
->power_off
);
894 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
895 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
897 strcpy(di
.name
, hdev
->name
);
898 di
.bdaddr
= hdev
->bdaddr
;
899 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
900 di
.flags
= hdev
->flags
;
901 di
.pkt_type
= hdev
->pkt_type
;
902 di
.acl_mtu
= hdev
->acl_mtu
;
903 di
.acl_pkts
= hdev
->acl_pkts
;
904 di
.sco_mtu
= hdev
->sco_mtu
;
905 di
.sco_pkts
= hdev
->sco_pkts
;
906 di
.link_policy
= hdev
->link_policy
;
907 di
.link_mode
= hdev
->link_mode
;
909 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
910 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
912 if (copy_to_user(arg
, &di
, sizeof(di
)))
920 /* ---- Interface to HCI drivers ---- */
922 static int hci_rfkill_set_block(void *data
, bool blocked
)
924 struct hci_dev
*hdev
= data
;
926 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
931 hci_dev_do_close(hdev
);
936 static const struct rfkill_ops hci_rfkill_ops
= {
937 .set_block
= hci_rfkill_set_block
,
940 /* Alloc HCI device */
941 struct hci_dev
*hci_alloc_dev(void)
943 struct hci_dev
*hdev
;
945 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
949 hci_init_sysfs(hdev
);
950 skb_queue_head_init(&hdev
->driver_init
);
954 EXPORT_SYMBOL(hci_alloc_dev
);
956 /* Free HCI device */
957 void hci_free_dev(struct hci_dev
*hdev
)
959 skb_queue_purge(&hdev
->driver_init
);
961 /* will free via device release */
962 put_device(&hdev
->dev
);
964 EXPORT_SYMBOL(hci_free_dev
);
966 static void hci_power_on(struct work_struct
*work
)
968 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
970 BT_DBG("%s", hdev
->name
);
972 if (hci_dev_open(hdev
->id
) < 0)
975 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
976 schedule_delayed_work(&hdev
->power_off
,
977 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
979 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
980 mgmt_index_added(hdev
);
983 static void hci_power_off(struct work_struct
*work
)
985 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
988 BT_DBG("%s", hdev
->name
);
990 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
992 hci_dev_close(hdev
->id
);
995 static void hci_discov_off(struct work_struct
*work
)
997 struct hci_dev
*hdev
;
1000 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1002 BT_DBG("%s", hdev
->name
);
1006 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1008 hdev
->discov_timeout
= 0;
1010 hci_dev_unlock(hdev
);
1013 int hci_uuids_clear(struct hci_dev
*hdev
)
1015 struct list_head
*p
, *n
;
1017 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1018 struct bt_uuid
*uuid
;
1020 uuid
= list_entry(p
, struct bt_uuid
, list
);
1029 int hci_link_keys_clear(struct hci_dev
*hdev
)
1031 struct list_head
*p
, *n
;
1033 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1034 struct link_key
*key
;
1036 key
= list_entry(p
, struct link_key
, list
);
1045 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1049 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1050 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1056 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1057 u8 key_type
, u8 old_key_type
)
1060 if (key_type
< 0x03)
1063 /* Debug keys are insecure so don't store them persistently */
1064 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1067 /* Changed combination key and there's no previous one */
1068 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1071 /* Security mode 3 case */
1075 /* Neither local nor remote side had no-bonding as requirement */
1076 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1079 /* Local side had dedicated bonding as requirement */
1080 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1083 /* Remote side had dedicated bonding as requirement */
1084 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1087 /* If none of the above criteria match, then don't store the key
1092 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1096 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1097 struct key_master_id
*id
;
1099 if (k
->type
!= HCI_LK_SMP_LTK
)
1102 if (k
->dlen
!= sizeof(*id
))
1105 id
= (void *) &k
->data
;
1106 if (id
->ediv
== ediv
&&
1107 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1113 EXPORT_SYMBOL(hci_find_ltk
);
1115 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1116 bdaddr_t
*bdaddr
, u8 type
)
1120 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1121 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1126 EXPORT_SYMBOL(hci_find_link_key_type
);
1128 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1129 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1131 struct link_key
*key
, *old_key
;
1132 u8 old_key_type
, persistent
;
1134 old_key
= hci_find_link_key(hdev
, bdaddr
);
1136 old_key_type
= old_key
->type
;
1139 old_key_type
= conn
? conn
->key_type
: 0xff;
1140 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1143 list_add(&key
->list
, &hdev
->link_keys
);
1146 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1148 /* Some buggy controller combinations generate a changed
1149 * combination key for legacy pairing even when there's no
1151 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1152 (!conn
|| conn
->remote_auth
== 0xff) &&
1153 old_key_type
== 0xff) {
1154 type
= HCI_LK_COMBINATION
;
1156 conn
->key_type
= type
;
1159 bacpy(&key
->bdaddr
, bdaddr
);
1160 memcpy(key
->val
, val
, 16);
1161 key
->pin_len
= pin_len
;
1163 if (type
== HCI_LK_CHANGED_COMBINATION
)
1164 key
->type
= old_key_type
;
1171 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1173 mgmt_new_link_key(hdev
, key
, persistent
);
1176 list_del(&key
->list
);
1183 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1184 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1186 struct link_key
*key
, *old_key
;
1187 struct key_master_id
*id
;
1190 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1192 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1195 old_key_type
= old_key
->type
;
1197 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1200 list_add(&key
->list
, &hdev
->link_keys
);
1201 old_key_type
= 0xff;
1204 key
->dlen
= sizeof(*id
);
1206 bacpy(&key
->bdaddr
, bdaddr
);
1207 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1208 key
->type
= HCI_LK_SMP_LTK
;
1209 key
->pin_len
= key_size
;
1211 id
= (void *) &key
->data
;
1213 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1216 mgmt_new_link_key(hdev
, key
, old_key_type
);
1221 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1223 struct link_key
*key
;
1225 key
= hci_find_link_key(hdev
, bdaddr
);
1229 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1231 list_del(&key
->list
);
1237 /* HCI command timer function */
1238 static void hci_cmd_timer(unsigned long arg
)
1240 struct hci_dev
*hdev
= (void *) arg
;
1242 BT_ERR("%s command tx timeout", hdev
->name
);
1243 atomic_set(&hdev
->cmd_cnt
, 1);
1244 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1247 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1250 struct oob_data
*data
;
1252 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1253 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1259 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1261 struct oob_data
*data
;
1263 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1267 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1269 list_del(&data
->list
);
1275 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1277 struct oob_data
*data
, *n
;
1279 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1280 list_del(&data
->list
);
1287 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1290 struct oob_data
*data
;
1292 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1295 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1299 bacpy(&data
->bdaddr
, bdaddr
);
1300 list_add(&data
->list
, &hdev
->remote_oob_data
);
1303 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1304 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1306 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1311 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1314 struct bdaddr_list
*b
;
1316 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1317 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1323 int hci_blacklist_clear(struct hci_dev
*hdev
)
1325 struct list_head
*p
, *n
;
1327 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1328 struct bdaddr_list
*b
;
1330 b
= list_entry(p
, struct bdaddr_list
, list
);
1339 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1341 struct bdaddr_list
*entry
;
1343 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1346 if (hci_blacklist_lookup(hdev
, bdaddr
))
1349 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1353 bacpy(&entry
->bdaddr
, bdaddr
);
1355 list_add(&entry
->list
, &hdev
->blacklist
);
1357 return mgmt_device_blocked(hdev
, bdaddr
);
1360 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1362 struct bdaddr_list
*entry
;
1364 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1365 return hci_blacklist_clear(hdev
);
1367 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1371 list_del(&entry
->list
);
1374 return mgmt_device_unblocked(hdev
, bdaddr
);
1377 static void hci_clear_adv_cache(struct work_struct
*work
)
1379 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1384 hci_adv_entries_clear(hdev
);
1386 hci_dev_unlock(hdev
);
1389 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1391 struct adv_entry
*entry
, *tmp
;
1393 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1394 list_del(&entry
->list
);
1398 BT_DBG("%s adv cache cleared", hdev
->name
);
1403 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1405 struct adv_entry
*entry
;
1407 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1408 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1414 static inline int is_connectable_adv(u8 evt_type
)
1416 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1422 int hci_add_adv_entry(struct hci_dev
*hdev
,
1423 struct hci_ev_le_advertising_info
*ev
)
1425 struct adv_entry
*entry
;
1427 if (!is_connectable_adv(ev
->evt_type
))
1430 /* Only new entries should be added to adv_entries. So, if
1431 * bdaddr was found, don't add it. */
1432 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1435 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1439 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1440 entry
->bdaddr_type
= ev
->bdaddr_type
;
1442 list_add(&entry
->list
, &hdev
->adv_entries
);
1444 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1445 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1450 /* Register HCI device */
1451 int hci_register_dev(struct hci_dev
*hdev
)
1453 struct list_head
*head
= &hci_dev_list
, *p
;
1456 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1457 hdev
->bus
, hdev
->owner
);
1459 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1462 /* Do not allow HCI_AMP devices to register at index 0,
1463 * so the index can be used as the AMP controller ID.
1465 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1467 write_lock(&hci_dev_list_lock
);
1469 /* Find first available device id */
1470 list_for_each(p
, &hci_dev_list
) {
1471 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1476 sprintf(hdev
->name
, "hci%d", id
);
1478 list_add_tail(&hdev
->list
, head
);
1480 atomic_set(&hdev
->refcnt
, 1);
1481 mutex_init(&hdev
->lock
);
1484 hdev
->dev_flags
= 0;
1485 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1486 hdev
->esco_type
= (ESCO_HV1
);
1487 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1488 hdev
->io_capability
= 0x03; /* No Input No Output */
1490 hdev
->idle_timeout
= 0;
1491 hdev
->sniff_max_interval
= 800;
1492 hdev
->sniff_min_interval
= 80;
1494 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1495 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1496 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1499 skb_queue_head_init(&hdev
->rx_q
);
1500 skb_queue_head_init(&hdev
->cmd_q
);
1501 skb_queue_head_init(&hdev
->raw_q
);
1503 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1505 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1506 hdev
->reassembly
[i
] = NULL
;
1508 init_waitqueue_head(&hdev
->req_wait_q
);
1509 mutex_init(&hdev
->req_lock
);
1511 inquiry_cache_init(hdev
);
1513 hci_conn_hash_init(hdev
);
1515 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1517 INIT_LIST_HEAD(&hdev
->blacklist
);
1519 INIT_LIST_HEAD(&hdev
->uuids
);
1521 INIT_LIST_HEAD(&hdev
->link_keys
);
1523 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1525 INIT_LIST_HEAD(&hdev
->adv_entries
);
1527 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1528 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1529 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1531 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1533 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1535 atomic_set(&hdev
->promisc
, 0);
1537 write_unlock(&hci_dev_list_lock
);
1539 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1541 if (!hdev
->workqueue
) {
1546 error
= hci_add_sysfs(hdev
);
1550 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1551 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1553 if (rfkill_register(hdev
->rfkill
) < 0) {
1554 rfkill_destroy(hdev
->rfkill
);
1555 hdev
->rfkill
= NULL
;
1559 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1560 set_bit(HCI_SETUP
, &hdev
->flags
);
1561 schedule_work(&hdev
->power_on
);
1563 hci_notify(hdev
, HCI_DEV_REG
);
1568 destroy_workqueue(hdev
->workqueue
);
1570 write_lock(&hci_dev_list_lock
);
1571 list_del(&hdev
->list
);
1572 write_unlock(&hci_dev_list_lock
);
1576 EXPORT_SYMBOL(hci_register_dev
);
1578 /* Unregister HCI device */
1579 void hci_unregister_dev(struct hci_dev
*hdev
)
1583 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1585 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1587 write_lock(&hci_dev_list_lock
);
1588 list_del(&hdev
->list
);
1589 write_unlock(&hci_dev_list_lock
);
1591 hci_dev_do_close(hdev
);
1593 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1594 kfree_skb(hdev
->reassembly
[i
]);
1596 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1597 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1599 mgmt_index_removed(hdev
);
1600 hci_dev_unlock(hdev
);
1603 /* mgmt_index_removed should take care of emptying the
1605 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1607 hci_notify(hdev
, HCI_DEV_UNREG
);
1610 rfkill_unregister(hdev
->rfkill
);
1611 rfkill_destroy(hdev
->rfkill
);
1614 hci_del_sysfs(hdev
);
1616 cancel_delayed_work_sync(&hdev
->adv_work
);
1618 destroy_workqueue(hdev
->workqueue
);
1621 hci_blacklist_clear(hdev
);
1622 hci_uuids_clear(hdev
);
1623 hci_link_keys_clear(hdev
);
1624 hci_remote_oob_data_clear(hdev
);
1625 hci_adv_entries_clear(hdev
);
1626 hci_dev_unlock(hdev
);
1628 __hci_dev_put(hdev
);
1630 EXPORT_SYMBOL(hci_unregister_dev
);
1632 /* Suspend HCI device */
1633 int hci_suspend_dev(struct hci_dev
*hdev
)
1635 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1638 EXPORT_SYMBOL(hci_suspend_dev
);
1640 /* Resume HCI device */
1641 int hci_resume_dev(struct hci_dev
*hdev
)
1643 hci_notify(hdev
, HCI_DEV_RESUME
);
1646 EXPORT_SYMBOL(hci_resume_dev
);
1648 /* Receive frame from HCI drivers */
1649 int hci_recv_frame(struct sk_buff
*skb
)
1651 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1652 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1653 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1659 bt_cb(skb
)->incoming
= 1;
1662 __net_timestamp(skb
);
1664 skb_queue_tail(&hdev
->rx_q
, skb
);
1665 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1669 EXPORT_SYMBOL(hci_recv_frame
);
1671 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1672 int count
, __u8 index
)
1677 struct sk_buff
*skb
;
1678 struct bt_skb_cb
*scb
;
1680 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1681 index
>= NUM_REASSEMBLY
)
1684 skb
= hdev
->reassembly
[index
];
1688 case HCI_ACLDATA_PKT
:
1689 len
= HCI_MAX_FRAME_SIZE
;
1690 hlen
= HCI_ACL_HDR_SIZE
;
1693 len
= HCI_MAX_EVENT_SIZE
;
1694 hlen
= HCI_EVENT_HDR_SIZE
;
1696 case HCI_SCODATA_PKT
:
1697 len
= HCI_MAX_SCO_SIZE
;
1698 hlen
= HCI_SCO_HDR_SIZE
;
1702 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1706 scb
= (void *) skb
->cb
;
1708 scb
->pkt_type
= type
;
1710 skb
->dev
= (void *) hdev
;
1711 hdev
->reassembly
[index
] = skb
;
1715 scb
= (void *) skb
->cb
;
1716 len
= min(scb
->expect
, (__u16
)count
);
1718 memcpy(skb_put(skb
, len
), data
, len
);
1727 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1728 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1729 scb
->expect
= h
->plen
;
1731 if (skb_tailroom(skb
) < scb
->expect
) {
1733 hdev
->reassembly
[index
] = NULL
;
1739 case HCI_ACLDATA_PKT
:
1740 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1741 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1742 scb
->expect
= __le16_to_cpu(h
->dlen
);
1744 if (skb_tailroom(skb
) < scb
->expect
) {
1746 hdev
->reassembly
[index
] = NULL
;
1752 case HCI_SCODATA_PKT
:
1753 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1754 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1755 scb
->expect
= h
->dlen
;
1757 if (skb_tailroom(skb
) < scb
->expect
) {
1759 hdev
->reassembly
[index
] = NULL
;
1766 if (scb
->expect
== 0) {
1767 /* Complete frame */
1769 bt_cb(skb
)->pkt_type
= type
;
1770 hci_recv_frame(skb
);
1772 hdev
->reassembly
[index
] = NULL
;
1780 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1784 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1788 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1792 data
+= (count
- rem
);
1798 EXPORT_SYMBOL(hci_recv_fragment
);
1800 #define STREAM_REASSEMBLY 0
1802 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1808 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1811 struct { char type
; } *pkt
;
1813 /* Start of the frame */
1820 type
= bt_cb(skb
)->pkt_type
;
1822 rem
= hci_reassembly(hdev
, type
, data
, count
,
1827 data
+= (count
- rem
);
1833 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1835 /* ---- Interface to upper protocols ---- */
1837 int hci_register_cb(struct hci_cb
*cb
)
1839 BT_DBG("%p name %s", cb
, cb
->name
);
1841 write_lock(&hci_cb_list_lock
);
1842 list_add(&cb
->list
, &hci_cb_list
);
1843 write_unlock(&hci_cb_list_lock
);
1847 EXPORT_SYMBOL(hci_register_cb
);
1849 int hci_unregister_cb(struct hci_cb
*cb
)
1851 BT_DBG("%p name %s", cb
, cb
->name
);
1853 write_lock(&hci_cb_list_lock
);
1854 list_del(&cb
->list
);
1855 write_unlock(&hci_cb_list_lock
);
1859 EXPORT_SYMBOL(hci_unregister_cb
);
1861 static int hci_send_frame(struct sk_buff
*skb
)
1863 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1870 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1872 if (atomic_read(&hdev
->promisc
)) {
1874 __net_timestamp(skb
);
1876 hci_send_to_sock(hdev
, skb
, NULL
);
1879 /* Get rid of skb owner, prior to sending to the driver. */
1882 return hdev
->send(skb
);
1885 /* Send HCI command */
1886 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1888 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1889 struct hci_command_hdr
*hdr
;
1890 struct sk_buff
*skb
;
1892 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1894 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1896 BT_ERR("%s no memory for command", hdev
->name
);
1900 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1901 hdr
->opcode
= cpu_to_le16(opcode
);
1905 memcpy(skb_put(skb
, plen
), param
, plen
);
1907 BT_DBG("skb len %d", skb
->len
);
1909 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1910 skb
->dev
= (void *) hdev
;
1912 if (test_bit(HCI_INIT
, &hdev
->flags
))
1913 hdev
->init_last_cmd
= opcode
;
1915 skb_queue_tail(&hdev
->cmd_q
, skb
);
1916 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1921 /* Get data from the previously sent command */
1922 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1924 struct hci_command_hdr
*hdr
;
1926 if (!hdev
->sent_cmd
)
1929 hdr
= (void *) hdev
->sent_cmd
->data
;
1931 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1934 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1936 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1940 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1942 struct hci_acl_hdr
*hdr
;
1945 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1946 skb_reset_transport_header(skb
);
1947 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1948 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1949 hdr
->dlen
= cpu_to_le16(len
);
1952 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1953 struct sk_buff
*skb
, __u16 flags
)
1955 struct hci_dev
*hdev
= conn
->hdev
;
1956 struct sk_buff
*list
;
1958 list
= skb_shinfo(skb
)->frag_list
;
1960 /* Non fragmented */
1961 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1963 skb_queue_tail(queue
, skb
);
1966 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1968 skb_shinfo(skb
)->frag_list
= NULL
;
1970 /* Queue all fragments atomically */
1971 spin_lock(&queue
->lock
);
1973 __skb_queue_tail(queue
, skb
);
1975 flags
&= ~ACL_START
;
1978 skb
= list
; list
= list
->next
;
1980 skb
->dev
= (void *) hdev
;
1981 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1982 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1984 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1986 __skb_queue_tail(queue
, skb
);
1989 spin_unlock(&queue
->lock
);
1993 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
1995 struct hci_conn
*conn
= chan
->conn
;
1996 struct hci_dev
*hdev
= conn
->hdev
;
1998 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2000 skb
->dev
= (void *) hdev
;
2001 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2002 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2004 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2006 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2008 EXPORT_SYMBOL(hci_send_acl
);
2011 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2013 struct hci_dev
*hdev
= conn
->hdev
;
2014 struct hci_sco_hdr hdr
;
2016 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2018 hdr
.handle
= cpu_to_le16(conn
->handle
);
2019 hdr
.dlen
= skb
->len
;
2021 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2022 skb_reset_transport_header(skb
);
2023 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2025 skb
->dev
= (void *) hdev
;
2026 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2028 skb_queue_tail(&conn
->data_q
, skb
);
2029 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2031 EXPORT_SYMBOL(hci_send_sco
);
2033 /* ---- HCI TX task (outgoing data) ---- */
2035 /* HCI Connection scheduler */
2036 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2038 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2039 struct hci_conn
*conn
= NULL
, *c
;
2040 int num
= 0, min
= ~0;
2042 /* We don't have to lock device here. Connections are always
2043 * added and removed with TX task disabled. */
2047 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2048 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2051 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2056 if (c
->sent
< min
) {
2061 if (hci_conn_num(hdev
, type
) == num
)
2070 switch (conn
->type
) {
2072 cnt
= hdev
->acl_cnt
;
2076 cnt
= hdev
->sco_cnt
;
2079 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2083 BT_ERR("Unknown link type");
2091 BT_DBG("conn %p quote %d", conn
, *quote
);
2095 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2097 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2100 BT_ERR("%s link tx timeout", hdev
->name
);
2104 /* Kill stalled connections */
2105 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2106 if (c
->type
== type
&& c
->sent
) {
2107 BT_ERR("%s killing stalled connection %s",
2108 hdev
->name
, batostr(&c
->dst
));
2109 hci_acl_disconn(c
, 0x13);
2116 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2119 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2120 struct hci_chan
*chan
= NULL
;
2121 int num
= 0, min
= ~0, cur_prio
= 0;
2122 struct hci_conn
*conn
;
2123 int cnt
, q
, conn_num
= 0;
2125 BT_DBG("%s", hdev
->name
);
2129 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2130 struct hci_chan
*tmp
;
2132 if (conn
->type
!= type
)
2135 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2140 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2141 struct sk_buff
*skb
;
2143 if (skb_queue_empty(&tmp
->data_q
))
2146 skb
= skb_peek(&tmp
->data_q
);
2147 if (skb
->priority
< cur_prio
)
2150 if (skb
->priority
> cur_prio
) {
2153 cur_prio
= skb
->priority
;
2158 if (conn
->sent
< min
) {
2164 if (hci_conn_num(hdev
, type
) == conn_num
)
2173 switch (chan
->conn
->type
) {
2175 cnt
= hdev
->acl_cnt
;
2179 cnt
= hdev
->sco_cnt
;
2182 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2186 BT_ERR("Unknown link type");
2191 BT_DBG("chan %p quote %d", chan
, *quote
);
2195 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2197 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2198 struct hci_conn
*conn
;
2201 BT_DBG("%s", hdev
->name
);
2205 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2206 struct hci_chan
*chan
;
2208 if (conn
->type
!= type
)
2211 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2216 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2217 struct sk_buff
*skb
;
2224 if (skb_queue_empty(&chan
->data_q
))
2227 skb
= skb_peek(&chan
->data_q
);
2228 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2231 skb
->priority
= HCI_PRIO_MAX
- 1;
2233 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2237 if (hci_conn_num(hdev
, type
) == num
)
2245 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2247 struct hci_chan
*chan
;
2248 struct sk_buff
*skb
;
2252 BT_DBG("%s", hdev
->name
);
2254 if (!hci_conn_num(hdev
, ACL_LINK
))
2257 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2258 /* ACL tx timeout must be longer than maximum
2259 * link supervision timeout (40.9 seconds) */
2260 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2261 hci_link_tx_to(hdev
, ACL_LINK
);
2264 cnt
= hdev
->acl_cnt
;
2266 while (hdev
->acl_cnt
&&
2267 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2268 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2269 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2270 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2271 skb
->len
, skb
->priority
);
2273 /* Stop if priority has changed */
2274 if (skb
->priority
< priority
)
2277 skb
= skb_dequeue(&chan
->data_q
);
2279 hci_conn_enter_active_mode(chan
->conn
,
2280 bt_cb(skb
)->force_active
);
2282 hci_send_frame(skb
);
2283 hdev
->acl_last_tx
= jiffies
;
2291 if (cnt
!= hdev
->acl_cnt
)
2292 hci_prio_recalculate(hdev
, ACL_LINK
);
2296 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2298 struct hci_conn
*conn
;
2299 struct sk_buff
*skb
;
2302 BT_DBG("%s", hdev
->name
);
2304 if (!hci_conn_num(hdev
, SCO_LINK
))
2307 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2308 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2309 BT_DBG("skb %p len %d", skb
, skb
->len
);
2310 hci_send_frame(skb
);
2313 if (conn
->sent
== ~0)
2319 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2321 struct hci_conn
*conn
;
2322 struct sk_buff
*skb
;
2325 BT_DBG("%s", hdev
->name
);
2327 if (!hci_conn_num(hdev
, ESCO_LINK
))
2330 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2331 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2332 BT_DBG("skb %p len %d", skb
, skb
->len
);
2333 hci_send_frame(skb
);
2336 if (conn
->sent
== ~0)
2342 static inline void hci_sched_le(struct hci_dev
*hdev
)
2344 struct hci_chan
*chan
;
2345 struct sk_buff
*skb
;
2346 int quote
, cnt
, tmp
;
2348 BT_DBG("%s", hdev
->name
);
2350 if (!hci_conn_num(hdev
, LE_LINK
))
2353 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2354 /* LE tx timeout must be longer than maximum
2355 * link supervision timeout (40.9 seconds) */
2356 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2357 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2358 hci_link_tx_to(hdev
, LE_LINK
);
2361 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2363 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2364 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2365 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2366 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2367 skb
->len
, skb
->priority
);
2369 /* Stop if priority has changed */
2370 if (skb
->priority
< priority
)
2373 skb
= skb_dequeue(&chan
->data_q
);
2375 hci_send_frame(skb
);
2376 hdev
->le_last_tx
= jiffies
;
2387 hdev
->acl_cnt
= cnt
;
2390 hci_prio_recalculate(hdev
, LE_LINK
);
2393 static void hci_tx_work(struct work_struct
*work
)
2395 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2396 struct sk_buff
*skb
;
2398 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2399 hdev
->sco_cnt
, hdev
->le_cnt
);
2401 /* Schedule queues and send stuff to HCI driver */
2403 hci_sched_acl(hdev
);
2405 hci_sched_sco(hdev
);
2407 hci_sched_esco(hdev
);
2411 /* Send next queued raw (unknown type) packet */
2412 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2413 hci_send_frame(skb
);
2416 /* ----- HCI RX task (incoming data processing) ----- */
2418 /* ACL data packet */
2419 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2421 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2422 struct hci_conn
*conn
;
2423 __u16 handle
, flags
;
2425 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2427 handle
= __le16_to_cpu(hdr
->handle
);
2428 flags
= hci_flags(handle
);
2429 handle
= hci_handle(handle
);
2431 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2433 hdev
->stat
.acl_rx
++;
2436 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2437 hci_dev_unlock(hdev
);
2440 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2442 /* Send to upper protocol */
2443 l2cap_recv_acldata(conn
, skb
, flags
);
2446 BT_ERR("%s ACL packet for unknown connection handle %d",
2447 hdev
->name
, handle
);
2453 /* SCO data packet */
2454 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2456 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2457 struct hci_conn
*conn
;
2460 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2462 handle
= __le16_to_cpu(hdr
->handle
);
2464 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2466 hdev
->stat
.sco_rx
++;
2469 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2470 hci_dev_unlock(hdev
);
2473 /* Send to upper protocol */
2474 sco_recv_scodata(conn
, skb
);
2477 BT_ERR("%s SCO packet for unknown connection handle %d",
2478 hdev
->name
, handle
);
2484 static void hci_rx_work(struct work_struct
*work
)
2486 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2487 struct sk_buff
*skb
;
2489 BT_DBG("%s", hdev
->name
);
2491 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2492 if (atomic_read(&hdev
->promisc
)) {
2493 /* Send copy to the sockets */
2494 hci_send_to_sock(hdev
, skb
, NULL
);
2497 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2502 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2503 /* Don't process data packets in this states. */
2504 switch (bt_cb(skb
)->pkt_type
) {
2505 case HCI_ACLDATA_PKT
:
2506 case HCI_SCODATA_PKT
:
2513 switch (bt_cb(skb
)->pkt_type
) {
2515 BT_DBG("%s Event packet", hdev
->name
);
2516 hci_event_packet(hdev
, skb
);
2519 case HCI_ACLDATA_PKT
:
2520 BT_DBG("%s ACL data packet", hdev
->name
);
2521 hci_acldata_packet(hdev
, skb
);
2524 case HCI_SCODATA_PKT
:
2525 BT_DBG("%s SCO data packet", hdev
->name
);
2526 hci_scodata_packet(hdev
, skb
);
2536 static void hci_cmd_work(struct work_struct
*work
)
2538 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2539 struct sk_buff
*skb
;
2541 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2543 /* Send queued commands */
2544 if (atomic_read(&hdev
->cmd_cnt
)) {
2545 skb
= skb_dequeue(&hdev
->cmd_q
);
2549 kfree_skb(hdev
->sent_cmd
);
2551 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2552 if (hdev
->sent_cmd
) {
2553 atomic_dec(&hdev
->cmd_cnt
);
2554 hci_send_frame(skb
);
2555 if (test_bit(HCI_RESET
, &hdev
->flags
))
2556 del_timer(&hdev
->cmd_timer
);
2558 mod_timer(&hdev
->cmd_timer
,
2559 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2561 skb_queue_head(&hdev
->cmd_q
, skb
);
2562 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2567 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2569 /* General inquiry access code (GIAC) */
2570 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2571 struct hci_cp_inquiry cp
;
2573 BT_DBG("%s", hdev
->name
);
2575 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2576 return -EINPROGRESS
;
2578 memset(&cp
, 0, sizeof(cp
));
2579 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2582 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2585 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2587 BT_DBG("%s", hdev
->name
);
2589 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2592 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2595 module_param(enable_hs
, bool, 0644);
2596 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");