2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_debugfs.h"
48 #include "hci_codec.h"
50 static void hci_rx_work(struct work_struct
*work
);
51 static void hci_cmd_work(struct work_struct
*work
);
52 static void hci_tx_work(struct work_struct
*work
);
55 LIST_HEAD(hci_dev_list
);
56 DEFINE_RWLOCK(hci_dev_list_lock
);
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list
);
60 DEFINE_MUTEX(hci_cb_list_lock
);
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida
);
65 /* Get HCI device by index.
66 * Device is held on return. */
67 struct hci_dev
*hci_dev_get(int index
)
69 struct hci_dev
*hdev
= NULL
, *d
;
76 read_lock(&hci_dev_list_lock
);
77 list_for_each_entry(d
, &hci_dev_list
, list
) {
79 hdev
= hci_dev_hold(d
);
83 read_unlock(&hci_dev_list_lock
);
87 /* ---- Inquiry support ---- */
89 bool hci_discovery_active(struct hci_dev
*hdev
)
91 struct discovery_state
*discov
= &hdev
->discovery
;
93 switch (discov
->state
) {
94 case DISCOVERY_FINDING
:
95 case DISCOVERY_RESOLVING
:
103 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
105 int old_state
= hdev
->discovery
.state
;
107 if (old_state
== state
)
110 hdev
->discovery
.state
= state
;
113 case DISCOVERY_STOPPED
:
114 hci_update_passive_scan(hdev
);
116 if (old_state
!= DISCOVERY_STARTING
)
117 mgmt_discovering(hdev
, 0);
119 case DISCOVERY_STARTING
:
121 case DISCOVERY_FINDING
:
122 mgmt_discovering(hdev
, 1);
124 case DISCOVERY_RESOLVING
:
126 case DISCOVERY_STOPPING
:
130 bt_dev_dbg(hdev
, "state %u -> %u", old_state
, state
);
133 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
135 struct discovery_state
*cache
= &hdev
->discovery
;
136 struct inquiry_entry
*p
, *n
;
138 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
143 INIT_LIST_HEAD(&cache
->unknown
);
144 INIT_LIST_HEAD(&cache
->resolve
);
147 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
150 struct discovery_state
*cache
= &hdev
->discovery
;
151 struct inquiry_entry
*e
;
153 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
155 list_for_each_entry(e
, &cache
->all
, all
) {
156 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
163 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
166 struct discovery_state
*cache
= &hdev
->discovery
;
167 struct inquiry_entry
*e
;
169 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
171 list_for_each_entry(e
, &cache
->unknown
, list
) {
172 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
179 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
183 struct discovery_state
*cache
= &hdev
->discovery
;
184 struct inquiry_entry
*e
;
186 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
188 list_for_each_entry(e
, &cache
->resolve
, list
) {
189 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
191 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
198 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
199 struct inquiry_entry
*ie
)
201 struct discovery_state
*cache
= &hdev
->discovery
;
202 struct list_head
*pos
= &cache
->resolve
;
203 struct inquiry_entry
*p
;
207 list_for_each_entry(p
, &cache
->resolve
, list
) {
208 if (p
->name_state
!= NAME_PENDING
&&
209 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
214 list_add(&ie
->list
, pos
);
217 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
220 struct discovery_state
*cache
= &hdev
->discovery
;
221 struct inquiry_entry
*ie
;
224 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
226 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
229 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
231 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
233 if (!ie
->data
.ssp_mode
)
234 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
236 if (ie
->name_state
== NAME_NEEDED
&&
237 data
->rssi
!= ie
->data
.rssi
) {
238 ie
->data
.rssi
= data
->rssi
;
239 hci_inquiry_cache_update_resolve(hdev
, ie
);
245 /* Entry not in the cache. Add new one. */
246 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
248 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
252 list_add(&ie
->all
, &cache
->all
);
255 ie
->name_state
= NAME_KNOWN
;
257 ie
->name_state
= NAME_NOT_KNOWN
;
258 list_add(&ie
->list
, &cache
->unknown
);
262 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
263 ie
->name_state
!= NAME_PENDING
) {
264 ie
->name_state
= NAME_KNOWN
;
268 memcpy(&ie
->data
, data
, sizeof(*data
));
269 ie
->timestamp
= jiffies
;
270 cache
->timestamp
= jiffies
;
272 if (ie
->name_state
== NAME_NOT_KNOWN
)
273 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
279 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
281 struct discovery_state
*cache
= &hdev
->discovery
;
282 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
283 struct inquiry_entry
*e
;
286 list_for_each_entry(e
, &cache
->all
, all
) {
287 struct inquiry_data
*data
= &e
->data
;
292 bacpy(&info
->bdaddr
, &data
->bdaddr
);
293 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
294 info
->pscan_period_mode
= data
->pscan_period_mode
;
295 info
->pscan_mode
= data
->pscan_mode
;
296 memcpy(info
->dev_class
, data
->dev_class
, 3);
297 info
->clock_offset
= data
->clock_offset
;
303 BT_DBG("cache %p, copied %d", cache
, copied
);
307 int hci_inquiry(void __user
*arg
)
309 __u8 __user
*ptr
= arg
;
310 struct hci_inquiry_req ir
;
311 struct hci_dev
*hdev
;
312 int err
= 0, do_inquiry
= 0, max_rsp
;
315 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
318 hdev
= hci_dev_get(ir
.dev_id
);
322 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
327 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
332 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
337 /* Restrict maximum inquiry length to 60 seconds */
338 if (ir
.length
> 60) {
344 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
345 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
346 hci_inquiry_cache_flush(hdev
);
349 hci_dev_unlock(hdev
);
352 hci_req_sync_lock(hdev
);
353 err
= hci_inquiry_sync(hdev
, ir
.length
, ir
.num_rsp
);
354 hci_req_sync_unlock(hdev
);
359 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360 * cleared). If it is interrupted by a signal, return -EINTR.
362 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
363 TASK_INTERRUPTIBLE
)) {
369 /* for unlimited number of responses we will use buffer with
372 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
374 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
375 * copy it to the user space.
377 buf
= kmalloc_array(max_rsp
, sizeof(struct inquiry_info
), GFP_KERNEL
);
384 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
385 hci_dev_unlock(hdev
);
387 BT_DBG("num_rsp %d", ir
.num_rsp
);
389 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
391 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
404 static int hci_dev_do_open(struct hci_dev
*hdev
)
408 BT_DBG("%s %p", hdev
->name
, hdev
);
410 hci_req_sync_lock(hdev
);
412 ret
= hci_dev_open_sync(hdev
);
414 hci_req_sync_unlock(hdev
);
418 /* ---- HCI ioctl helpers ---- */
420 int hci_dev_open(__u16 dev
)
422 struct hci_dev
*hdev
;
425 hdev
= hci_dev_get(dev
);
429 /* Devices that are marked as unconfigured can only be powered
430 * up as user channel. Trying to bring them up as normal devices
431 * will result into a failure. Only user channel operation is
434 * When this function is called for a user channel, the flag
435 * HCI_USER_CHANNEL will be set first before attempting to
438 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
439 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
444 /* We need to ensure that no other power on/off work is pending
445 * before proceeding to call hci_dev_do_open. This is
446 * particularly important if the setup procedure has not yet
449 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
450 cancel_delayed_work(&hdev
->power_off
);
452 /* After this call it is guaranteed that the setup procedure
453 * has finished. This means that error conditions like RFKILL
454 * or no valid public or static random address apply.
456 flush_workqueue(hdev
->req_workqueue
);
458 /* For controllers not using the management interface and that
459 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460 * so that pairing works for them. Once the management interface
461 * is in use this bit will be cleared again and userspace has
462 * to explicitly enable it.
464 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
465 !hci_dev_test_flag(hdev
, HCI_MGMT
))
466 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
468 err
= hci_dev_do_open(hdev
);
475 int hci_dev_do_close(struct hci_dev
*hdev
)
479 BT_DBG("%s %p", hdev
->name
, hdev
);
481 hci_req_sync_lock(hdev
);
483 err
= hci_dev_close_sync(hdev
);
485 hci_req_sync_unlock(hdev
);
490 int hci_dev_close(__u16 dev
)
492 struct hci_dev
*hdev
;
495 hdev
= hci_dev_get(dev
);
499 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
504 cancel_work_sync(&hdev
->power_on
);
505 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
506 cancel_delayed_work(&hdev
->power_off
);
508 err
= hci_dev_do_close(hdev
);
515 static int hci_dev_do_reset(struct hci_dev
*hdev
)
519 BT_DBG("%s %p", hdev
->name
, hdev
);
521 hci_req_sync_lock(hdev
);
524 skb_queue_purge(&hdev
->rx_q
);
525 skb_queue_purge(&hdev
->cmd_q
);
527 /* Cancel these to avoid queueing non-chained pending work */
528 hci_dev_set_flag(hdev
, HCI_CMD_DRAIN_WORKQUEUE
);
531 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
534 * inside RCU section to see the flag or complete scheduling.
537 /* Explicitly cancel works in case scheduled after setting the flag. */
538 cancel_delayed_work(&hdev
->cmd_timer
);
539 cancel_delayed_work(&hdev
->ncmd_timer
);
541 /* Avoid potential lockdep warnings from the *_flush() calls by
542 * ensuring the workqueue is empty up front.
544 drain_workqueue(hdev
->workqueue
);
547 hci_inquiry_cache_flush(hdev
);
548 hci_conn_hash_flush(hdev
);
549 hci_dev_unlock(hdev
);
554 hci_dev_clear_flag(hdev
, HCI_CMD_DRAIN_WORKQUEUE
);
556 atomic_set(&hdev
->cmd_cnt
, 1);
562 ret
= hci_reset_sync(hdev
);
564 hci_req_sync_unlock(hdev
);
568 int hci_dev_reset(__u16 dev
)
570 struct hci_dev
*hdev
;
573 hdev
= hci_dev_get(dev
);
577 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
582 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
587 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
592 err
= hci_dev_do_reset(hdev
);
599 int hci_dev_reset_stat(__u16 dev
)
601 struct hci_dev
*hdev
;
604 hdev
= hci_dev_get(dev
);
608 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
613 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
618 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
625 static void hci_update_passive_scan_state(struct hci_dev
*hdev
, u8 scan
)
627 bool conn_changed
, discov_changed
;
629 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
631 if ((scan
& SCAN_PAGE
))
632 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
635 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
638 if ((scan
& SCAN_INQUIRY
)) {
639 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
642 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
643 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
647 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
650 if (conn_changed
|| discov_changed
) {
651 /* In case this was disabled through mgmt */
652 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
654 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
655 hci_update_adv_data(hdev
, hdev
->cur_adv_instance
);
657 mgmt_new_settings(hdev
);
661 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
663 struct hci_dev
*hdev
;
664 struct hci_dev_req dr
;
668 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
671 hdev
= hci_dev_get(dr
.dev_id
);
675 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
680 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
685 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
692 err
= hci_cmd_sync_status(hdev
, HCI_OP_WRITE_AUTH_ENABLE
,
693 1, &dr
.dev_opt
, HCI_CMD_TIMEOUT
);
697 if (!lmp_encrypt_capable(hdev
)) {
702 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
703 /* Auth must be enabled first */
704 err
= hci_cmd_sync_status(hdev
,
705 HCI_OP_WRITE_AUTH_ENABLE
,
712 err
= hci_cmd_sync_status(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
,
713 1, &dr
.dev_opt
, HCI_CMD_TIMEOUT
);
717 err
= hci_cmd_sync_status(hdev
, HCI_OP_WRITE_SCAN_ENABLE
,
718 1, &dr
.dev_opt
, HCI_CMD_TIMEOUT
);
720 /* Ensure that the connectable and discoverable states
721 * get correctly modified as this was a non-mgmt change.
724 hci_update_passive_scan_state(hdev
, dr
.dev_opt
);
728 policy
= cpu_to_le16(dr
.dev_opt
);
730 err
= hci_cmd_sync_status(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
,
731 2, &policy
, HCI_CMD_TIMEOUT
);
735 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
736 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
740 if (hdev
->pkt_type
== (__u16
) dr
.dev_opt
)
743 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
744 mgmt_phy_configuration_changed(hdev
, NULL
);
748 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
749 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
753 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
754 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
767 int hci_get_dev_list(void __user
*arg
)
769 struct hci_dev
*hdev
;
770 struct hci_dev_list_req
*dl
;
771 struct hci_dev_req
*dr
;
775 if (get_user(dev_num
, (__u16 __user
*) arg
))
778 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
781 dl
= kzalloc(struct_size(dl
, dev_req
, dev_num
), GFP_KERNEL
);
785 dl
->dev_num
= dev_num
;
788 read_lock(&hci_dev_list_lock
);
789 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
790 unsigned long flags
= hdev
->flags
;
792 /* When the auto-off is configured it means the transport
793 * is running, but in that case still indicate that the
794 * device is actually down.
796 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
797 flags
&= ~BIT(HCI_UP
);
799 dr
[n
].dev_id
= hdev
->id
;
800 dr
[n
].dev_opt
= flags
;
805 read_unlock(&hci_dev_list_lock
);
808 err
= copy_to_user(arg
, dl
, struct_size(dl
, dev_req
, n
));
811 return err
? -EFAULT
: 0;
814 int hci_get_dev_info(void __user
*arg
)
816 struct hci_dev
*hdev
;
817 struct hci_dev_info di
;
821 if (copy_from_user(&di
, arg
, sizeof(di
)))
824 hdev
= hci_dev_get(di
.dev_id
);
828 /* When the auto-off is configured it means the transport
829 * is running, but in that case still indicate that the
830 * device is actually down.
832 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
833 flags
= hdev
->flags
& ~BIT(HCI_UP
);
837 strscpy(di
.name
, hdev
->name
, sizeof(di
.name
));
838 di
.bdaddr
= hdev
->bdaddr
;
839 di
.type
= (hdev
->bus
& 0x0f);
841 di
.pkt_type
= hdev
->pkt_type
;
842 if (lmp_bredr_capable(hdev
)) {
843 di
.acl_mtu
= hdev
->acl_mtu
;
844 di
.acl_pkts
= hdev
->acl_pkts
;
845 di
.sco_mtu
= hdev
->sco_mtu
;
846 di
.sco_pkts
= hdev
->sco_pkts
;
848 di
.acl_mtu
= hdev
->le_mtu
;
849 di
.acl_pkts
= hdev
->le_pkts
;
853 di
.link_policy
= hdev
->link_policy
;
854 di
.link_mode
= hdev
->link_mode
;
856 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
857 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
859 if (copy_to_user(arg
, &di
, sizeof(di
)))
867 /* ---- Interface to HCI drivers ---- */
869 static int hci_dev_do_poweroff(struct hci_dev
*hdev
)
873 BT_DBG("%s %p", hdev
->name
, hdev
);
875 hci_req_sync_lock(hdev
);
877 err
= hci_set_powered_sync(hdev
, false);
879 hci_req_sync_unlock(hdev
);
884 static int hci_rfkill_set_block(void *data
, bool blocked
)
886 struct hci_dev
*hdev
= data
;
889 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
891 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
894 if (blocked
== hci_dev_test_flag(hdev
, HCI_RFKILLED
))
898 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
900 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
901 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
902 err
= hci_dev_do_poweroff(hdev
);
904 bt_dev_err(hdev
, "Error when powering off device on rfkill (%d)",
907 /* Make sure the device is still closed even if
908 * anything during power off sequence (eg.
909 * disconnecting devices) failed.
911 hci_dev_do_close(hdev
);
915 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
921 static const struct rfkill_ops hci_rfkill_ops
= {
922 .set_block
= hci_rfkill_set_block
,
925 static void hci_power_on(struct work_struct
*work
)
927 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
930 BT_DBG("%s", hdev
->name
);
932 if (test_bit(HCI_UP
, &hdev
->flags
) &&
933 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
934 hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
935 cancel_delayed_work(&hdev
->power_off
);
936 err
= hci_powered_update_sync(hdev
);
937 mgmt_power_on(hdev
, err
);
941 err
= hci_dev_do_open(hdev
);
944 mgmt_set_powered_failed(hdev
, err
);
945 hci_dev_unlock(hdev
);
949 /* During the HCI setup phase, a few error conditions are
950 * ignored and they need to be checked now. If they are still
951 * valid, it is important to turn the device back off.
953 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
954 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
955 (!bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
956 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
957 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
958 hci_dev_do_close(hdev
);
959 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
960 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
961 HCI_AUTO_OFF_TIMEOUT
);
964 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
965 /* For unconfigured devices, set the HCI_RAW flag
966 * so that userspace can easily identify them.
968 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
969 set_bit(HCI_RAW
, &hdev
->flags
);
971 /* For fully configured devices, this will send
972 * the Index Added event. For unconfigured devices,
973 * it will send Unconfigued Index Added event.
975 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976 * and no event will be send.
978 mgmt_index_added(hdev
);
979 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
980 /* When the controller is now configured, then it
981 * is important to clear the HCI_RAW flag.
983 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
984 clear_bit(HCI_RAW
, &hdev
->flags
);
986 /* Powering on the controller with HCI_CONFIG set only
987 * happens with the transition from unconfigured to
988 * configured. This will send the Index Added event.
990 mgmt_index_added(hdev
);
994 static void hci_power_off(struct work_struct
*work
)
996 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
999 BT_DBG("%s", hdev
->name
);
1001 hci_dev_do_close(hdev
);
1004 static void hci_error_reset(struct work_struct
*work
)
1006 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
1009 BT_DBG("%s", hdev
->name
);
1012 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
1014 bt_dev_err(hdev
, "hardware error 0x%2.2x", hdev
->hw_error_code
);
1016 if (!hci_dev_do_close(hdev
))
1017 hci_dev_do_open(hdev
);
1022 void hci_uuids_clear(struct hci_dev
*hdev
)
1024 struct bt_uuid
*uuid
, *tmp
;
1026 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
1027 list_del(&uuid
->list
);
1032 void hci_link_keys_clear(struct hci_dev
*hdev
)
1034 struct link_key
*key
, *tmp
;
1036 list_for_each_entry_safe(key
, tmp
, &hdev
->link_keys
, list
) {
1037 list_del_rcu(&key
->list
);
1038 kfree_rcu(key
, rcu
);
1042 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
1044 struct smp_ltk
*k
, *tmp
;
1046 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1047 list_del_rcu(&k
->list
);
1052 void hci_smp_irks_clear(struct hci_dev
*hdev
)
1054 struct smp_irk
*k
, *tmp
;
1056 list_for_each_entry_safe(k
, tmp
, &hdev
->identity_resolving_keys
, list
) {
1057 list_del_rcu(&k
->list
);
1062 void hci_blocked_keys_clear(struct hci_dev
*hdev
)
1064 struct blocked_key
*b
, *tmp
;
1066 list_for_each_entry_safe(b
, tmp
, &hdev
->blocked_keys
, list
) {
1067 list_del_rcu(&b
->list
);
1072 bool hci_is_blocked_key(struct hci_dev
*hdev
, u8 type
, u8 val
[16])
1074 bool blocked
= false;
1075 struct blocked_key
*b
;
1078 list_for_each_entry_rcu(b
, &hdev
->blocked_keys
, list
) {
1079 if (b
->type
== type
&& !memcmp(b
->val
, val
, sizeof(b
->val
))) {
1089 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1094 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
1095 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
1098 if (hci_is_blocked_key(hdev
,
1099 HCI_BLOCKED_KEY_TYPE_LINKKEY
,
1101 bt_dev_warn_ratelimited(hdev
,
1102 "Link key blocked for %pMR",
1115 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1116 u8 key_type
, u8 old_key_type
)
1119 if (key_type
< 0x03)
1122 /* Debug keys are insecure so don't store them persistently */
1123 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1126 /* Changed combination key and there's no previous one */
1127 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1130 /* Security mode 3 case */
1134 /* BR/EDR key derived using SC from an LE link */
1135 if (conn
->type
== LE_LINK
)
1138 /* Neither local nor remote side had no-bonding as requirement */
1139 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1142 /* Local side had dedicated bonding as requirement */
1143 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1146 /* Remote side had dedicated bonding as requirement */
1147 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1150 /* If none of the above criteria match, then don't store the key
1155 static u8
ltk_role(u8 type
)
1157 if (type
== SMP_LTK
)
1158 return HCI_ROLE_MASTER
;
1160 return HCI_ROLE_SLAVE
;
1163 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1164 u8 addr_type
, u8 role
)
1169 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
1170 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
1173 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
1176 if (hci_is_blocked_key(hdev
, HCI_BLOCKED_KEY_TYPE_LTK
,
1178 bt_dev_warn_ratelimited(hdev
,
1179 "LTK blocked for %pMR",
1192 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
1194 struct smp_irk
*irk_to_return
= NULL
;
1195 struct smp_irk
*irk
;
1198 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
1199 if (!bacmp(&irk
->rpa
, rpa
)) {
1200 irk_to_return
= irk
;
1205 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
1206 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
1207 bacpy(&irk
->rpa
, rpa
);
1208 irk_to_return
= irk
;
1214 if (irk_to_return
&& hci_is_blocked_key(hdev
, HCI_BLOCKED_KEY_TYPE_IRK
,
1215 irk_to_return
->val
)) {
1216 bt_dev_warn_ratelimited(hdev
, "Identity key blocked for %pMR",
1217 &irk_to_return
->bdaddr
);
1218 irk_to_return
= NULL
;
1223 return irk_to_return
;
1226 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1229 struct smp_irk
*irk_to_return
= NULL
;
1230 struct smp_irk
*irk
;
1232 /* Identity Address must be public or static random */
1233 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
1237 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
1238 if (addr_type
== irk
->addr_type
&&
1239 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
1240 irk_to_return
= irk
;
1247 if (irk_to_return
&& hci_is_blocked_key(hdev
, HCI_BLOCKED_KEY_TYPE_IRK
,
1248 irk_to_return
->val
)) {
1249 bt_dev_warn_ratelimited(hdev
, "Identity key blocked for %pMR",
1250 &irk_to_return
->bdaddr
);
1251 irk_to_return
= NULL
;
1256 return irk_to_return
;
1259 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1260 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
1261 u8 pin_len
, bool *persistent
)
1263 struct link_key
*key
, *old_key
;
1266 old_key
= hci_find_link_key(hdev
, bdaddr
);
1268 old_key_type
= old_key
->type
;
1271 old_key_type
= conn
? conn
->key_type
: 0xff;
1272 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
1275 list_add_rcu(&key
->list
, &hdev
->link_keys
);
1278 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1280 /* Some buggy controller combinations generate a changed
1281 * combination key for legacy pairing even when there's no
1283 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1284 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1285 type
= HCI_LK_COMBINATION
;
1287 conn
->key_type
= type
;
1290 bacpy(&key
->bdaddr
, bdaddr
);
1291 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1292 key
->pin_len
= pin_len
;
1294 if (type
== HCI_LK_CHANGED_COMBINATION
)
1295 key
->type
= old_key_type
;
1300 *persistent
= hci_persistent_key(hdev
, conn
, type
,
1306 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1307 u8 addr_type
, u8 type
, u8 authenticated
,
1308 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
1310 struct smp_ltk
*key
, *old_key
;
1311 u8 role
= ltk_role(type
);
1313 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
1317 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
1320 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
1323 bacpy(&key
->bdaddr
, bdaddr
);
1324 key
->bdaddr_type
= addr_type
;
1325 memcpy(key
->val
, tk
, sizeof(key
->val
));
1326 key
->authenticated
= authenticated
;
1329 key
->enc_size
= enc_size
;
1335 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1336 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
1338 struct smp_irk
*irk
;
1340 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
1342 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
1346 bacpy(&irk
->bdaddr
, bdaddr
);
1347 irk
->addr_type
= addr_type
;
1349 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
1352 memcpy(irk
->val
, val
, 16);
1353 bacpy(&irk
->rpa
, rpa
);
1358 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1360 struct link_key
*key
;
1362 key
= hci_find_link_key(hdev
, bdaddr
);
1366 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1368 list_del_rcu(&key
->list
);
1369 kfree_rcu(key
, rcu
);
1374 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
1376 struct smp_ltk
*k
, *tmp
;
1379 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1380 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
1383 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1385 list_del_rcu(&k
->list
);
1390 return removed
? 0 : -ENOENT
;
1393 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
1395 struct smp_irk
*k
, *tmp
;
1397 list_for_each_entry_safe(k
, tmp
, &hdev
->identity_resolving_keys
, list
) {
1398 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
1401 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1403 list_del_rcu(&k
->list
);
1408 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1411 struct smp_irk
*irk
;
1414 if (type
== BDADDR_BREDR
) {
1415 if (hci_find_link_key(hdev
, bdaddr
))
1420 /* Convert to HCI addr type which struct smp_ltk uses */
1421 if (type
== BDADDR_LE_PUBLIC
)
1422 addr_type
= ADDR_LE_DEV_PUBLIC
;
1424 addr_type
= ADDR_LE_DEV_RANDOM
;
1426 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
1428 bdaddr
= &irk
->bdaddr
;
1429 addr_type
= irk
->addr_type
;
1433 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
1434 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
1444 /* HCI command timer function */
1445 static void hci_cmd_timeout(struct work_struct
*work
)
1447 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1450 if (hdev
->req_skb
) {
1451 u16 opcode
= hci_skb_opcode(hdev
->req_skb
);
1453 bt_dev_err(hdev
, "command 0x%4.4x tx timeout", opcode
);
1455 hci_cmd_sync_cancel_sync(hdev
, ETIMEDOUT
);
1457 bt_dev_err(hdev
, "command tx timeout");
1460 if (hdev
->cmd_timeout
)
1461 hdev
->cmd_timeout(hdev
);
1463 atomic_set(&hdev
->cmd_cnt
, 1);
1464 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1467 /* HCI ncmd timer function */
1468 static void hci_ncmd_timeout(struct work_struct
*work
)
1470 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1473 bt_dev_err(hdev
, "Controller not accepting commands anymore: ncmd = 0");
1475 /* During HCI_INIT phase no events can be injected if the ncmd timer
1476 * triggers since the procedure has its own timeout handling.
1478 if (test_bit(HCI_INIT
, &hdev
->flags
))
1481 /* This is an irrecoverable state, inject hardware error event */
1482 hci_reset_dev(hdev
);
1485 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1486 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
1488 struct oob_data
*data
;
1490 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
1491 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
1493 if (data
->bdaddr_type
!= bdaddr_type
)
1501 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1504 struct oob_data
*data
;
1506 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
1510 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
1512 list_del(&data
->list
);
1518 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1520 struct oob_data
*data
, *n
;
1522 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1523 list_del(&data
->list
);
1528 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1529 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
1530 u8
*hash256
, u8
*rand256
)
1532 struct oob_data
*data
;
1534 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
1536 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
1540 bacpy(&data
->bdaddr
, bdaddr
);
1541 data
->bdaddr_type
= bdaddr_type
;
1542 list_add(&data
->list
, &hdev
->remote_oob_data
);
1545 if (hash192
&& rand192
) {
1546 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
1547 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
1548 if (hash256
&& rand256
)
1549 data
->present
= 0x03;
1551 memset(data
->hash192
, 0, sizeof(data
->hash192
));
1552 memset(data
->rand192
, 0, sizeof(data
->rand192
));
1553 if (hash256
&& rand256
)
1554 data
->present
= 0x02;
1556 data
->present
= 0x00;
1559 if (hash256
&& rand256
) {
1560 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
1561 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
1563 memset(data
->hash256
, 0, sizeof(data
->hash256
));
1564 memset(data
->rand256
, 0, sizeof(data
->rand256
));
1565 if (hash192
&& rand192
)
1566 data
->present
= 0x01;
1569 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1574 /* This function requires the caller holds hdev->lock */
1575 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
1577 struct adv_info
*adv_instance
;
1579 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
1580 if (adv_instance
->instance
== instance
)
1581 return adv_instance
;
1587 /* This function requires the caller holds hdev->lock */
1588 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
)
1590 struct adv_info
*cur_instance
;
1592 cur_instance
= hci_find_adv_instance(hdev
, instance
);
1596 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
1597 struct adv_info
, list
))
1598 return list_first_entry(&hdev
->adv_instances
,
1599 struct adv_info
, list
);
1601 return list_next_entry(cur_instance
, list
);
1604 /* This function requires the caller holds hdev->lock */
1605 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
1607 struct adv_info
*adv_instance
;
1609 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1613 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
1615 if (hdev
->cur_adv_instance
== instance
) {
1616 if (hdev
->adv_instance_timeout
) {
1617 cancel_delayed_work(&hdev
->adv_instance_expire
);
1618 hdev
->adv_instance_timeout
= 0;
1620 hdev
->cur_adv_instance
= 0x00;
1623 cancel_delayed_work_sync(&adv_instance
->rpa_expired_cb
);
1625 list_del(&adv_instance
->list
);
1626 kfree(adv_instance
);
1628 hdev
->adv_instance_cnt
--;
1633 void hci_adv_instances_set_rpa_expired(struct hci_dev
*hdev
, bool rpa_expired
)
1635 struct adv_info
*adv_instance
, *n
;
1637 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
)
1638 adv_instance
->rpa_expired
= rpa_expired
;
1641 /* This function requires the caller holds hdev->lock */
1642 void hci_adv_instances_clear(struct hci_dev
*hdev
)
1644 struct adv_info
*adv_instance
, *n
;
1646 if (hdev
->adv_instance_timeout
) {
1647 disable_delayed_work(&hdev
->adv_instance_expire
);
1648 hdev
->adv_instance_timeout
= 0;
1651 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
1652 disable_delayed_work_sync(&adv_instance
->rpa_expired_cb
);
1653 list_del(&adv_instance
->list
);
1654 kfree(adv_instance
);
1657 hdev
->adv_instance_cnt
= 0;
1658 hdev
->cur_adv_instance
= 0x00;
1661 static void adv_instance_rpa_expired(struct work_struct
*work
)
1663 struct adv_info
*adv_instance
= container_of(work
, struct adv_info
,
1664 rpa_expired_cb
.work
);
1668 adv_instance
->rpa_expired
= true;
1671 /* This function requires the caller holds hdev->lock */
1672 struct adv_info
*hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
,
1673 u32 flags
, u16 adv_data_len
, u8
*adv_data
,
1674 u16 scan_rsp_len
, u8
*scan_rsp_data
,
1675 u16 timeout
, u16 duration
, s8 tx_power
,
1676 u32 min_interval
, u32 max_interval
,
1679 struct adv_info
*adv
;
1681 adv
= hci_find_adv_instance(hdev
, instance
);
1683 memset(adv
->adv_data
, 0, sizeof(adv
->adv_data
));
1684 memset(adv
->scan_rsp_data
, 0, sizeof(adv
->scan_rsp_data
));
1685 memset(adv
->per_adv_data
, 0, sizeof(adv
->per_adv_data
));
1687 if (hdev
->adv_instance_cnt
>= hdev
->le_num_of_adv_sets
||
1688 instance
< 1 || instance
> hdev
->le_num_of_adv_sets
+ 1)
1689 return ERR_PTR(-EOVERFLOW
);
1691 adv
= kzalloc(sizeof(*adv
), GFP_KERNEL
);
1693 return ERR_PTR(-ENOMEM
);
1695 adv
->pending
= true;
1696 adv
->instance
= instance
;
1698 /* If controller support only one set and the instance is set to
1699 * 1 then there is no option other than using handle 0x00.
1701 if (hdev
->le_num_of_adv_sets
== 1 && instance
== 1)
1704 adv
->handle
= instance
;
1706 list_add(&adv
->list
, &hdev
->adv_instances
);
1707 hdev
->adv_instance_cnt
++;
1711 adv
->min_interval
= min_interval
;
1712 adv
->max_interval
= max_interval
;
1713 adv
->tx_power
= tx_power
;
1714 /* Defining a mesh_handle changes the timing units to ms,
1715 * rather than seconds, and ties the instance to the requested
1718 adv
->mesh
= mesh_handle
;
1720 hci_set_adv_instance_data(hdev
, instance
, adv_data_len
, adv_data
,
1721 scan_rsp_len
, scan_rsp_data
);
1723 adv
->timeout
= timeout
;
1724 adv
->remaining_time
= timeout
;
1727 adv
->duration
= hdev
->def_multi_adv_rotation_duration
;
1729 adv
->duration
= duration
;
1731 INIT_DELAYED_WORK(&adv
->rpa_expired_cb
, adv_instance_rpa_expired
);
1733 BT_DBG("%s for %dMR", hdev
->name
, instance
);
1738 /* This function requires the caller holds hdev->lock */
1739 struct adv_info
*hci_add_per_instance(struct hci_dev
*hdev
, u8 instance
,
1740 u32 flags
, u8 data_len
, u8
*data
,
1741 u32 min_interval
, u32 max_interval
)
1743 struct adv_info
*adv
;
1745 adv
= hci_add_adv_instance(hdev
, instance
, flags
, 0, NULL
, 0, NULL
,
1746 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE
,
1747 min_interval
, max_interval
, 0);
1751 adv
->periodic
= true;
1752 adv
->per_adv_data_len
= data_len
;
1755 memcpy(adv
->per_adv_data
, data
, data_len
);
1760 /* This function requires the caller holds hdev->lock */
1761 int hci_set_adv_instance_data(struct hci_dev
*hdev
, u8 instance
,
1762 u16 adv_data_len
, u8
*adv_data
,
1763 u16 scan_rsp_len
, u8
*scan_rsp_data
)
1765 struct adv_info
*adv
;
1767 adv
= hci_find_adv_instance(hdev
, instance
);
1769 /* If advertisement doesn't exist, we can't modify its data */
1773 if (adv_data_len
&& ADV_DATA_CMP(adv
, adv_data
, adv_data_len
)) {
1774 memset(adv
->adv_data
, 0, sizeof(adv
->adv_data
));
1775 memcpy(adv
->adv_data
, adv_data
, adv_data_len
);
1776 adv
->adv_data_len
= adv_data_len
;
1777 adv
->adv_data_changed
= true;
1780 if (scan_rsp_len
&& SCAN_RSP_CMP(adv
, scan_rsp_data
, scan_rsp_len
)) {
1781 memset(adv
->scan_rsp_data
, 0, sizeof(adv
->scan_rsp_data
));
1782 memcpy(adv
->scan_rsp_data
, scan_rsp_data
, scan_rsp_len
);
1783 adv
->scan_rsp_len
= scan_rsp_len
;
1784 adv
->scan_rsp_changed
= true;
1787 /* Mark as changed if there are flags which would affect it */
1788 if (((adv
->flags
& MGMT_ADV_FLAG_APPEARANCE
) && hdev
->appearance
) ||
1789 adv
->flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
1790 adv
->scan_rsp_changed
= true;
1795 /* This function requires the caller holds hdev->lock */
1796 u32
hci_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
1799 struct adv_info
*adv
;
1801 if (instance
== 0x00) {
1802 /* Instance 0 always manages the "Tx Power" and "Flags"
1805 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
1807 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1808 * corresponds to the "connectable" instance flag.
1810 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
1811 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
1813 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1814 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
1815 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
1816 flags
|= MGMT_ADV_FLAG_DISCOV
;
1821 adv
= hci_find_adv_instance(hdev
, instance
);
1823 /* Return 0 when we got an invalid instance identifier. */
1830 bool hci_adv_instance_is_scannable(struct hci_dev
*hdev
, u8 instance
)
1832 struct adv_info
*adv
;
1834 /* Instance 0x00 always set local name */
1835 if (instance
== 0x00)
1838 adv
= hci_find_adv_instance(hdev
, instance
);
1842 if (adv
->flags
& MGMT_ADV_FLAG_APPEARANCE
||
1843 adv
->flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
1846 return adv
->scan_rsp_len
? true : false;
1849 /* This function requires the caller holds hdev->lock */
1850 void hci_adv_monitors_clear(struct hci_dev
*hdev
)
1852 struct adv_monitor
*monitor
;
1855 idr_for_each_entry(&hdev
->adv_monitors_idr
, monitor
, handle
)
1856 hci_free_adv_monitor(hdev
, monitor
);
1858 idr_destroy(&hdev
->adv_monitors_idr
);
1861 /* Frees the monitor structure and do some bookkeepings.
1862 * This function requires the caller holds hdev->lock.
1864 void hci_free_adv_monitor(struct hci_dev
*hdev
, struct adv_monitor
*monitor
)
1866 struct adv_pattern
*pattern
;
1867 struct adv_pattern
*tmp
;
1872 list_for_each_entry_safe(pattern
, tmp
, &monitor
->patterns
, list
) {
1873 list_del(&pattern
->list
);
1877 if (monitor
->handle
)
1878 idr_remove(&hdev
->adv_monitors_idr
, monitor
->handle
);
1880 if (monitor
->state
!= ADV_MONITOR_STATE_NOT_REGISTERED
) {
1881 hdev
->adv_monitors_cnt
--;
1882 mgmt_adv_monitor_removed(hdev
, monitor
->handle
);
1888 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1889 * also attempts to forward the request to the controller.
1890 * This function requires the caller holds hci_req_sync_lock.
1892 int hci_add_adv_monitor(struct hci_dev
*hdev
, struct adv_monitor
*monitor
)
1894 int min
, max
, handle
;
1902 min
= HCI_MIN_ADV_MONITOR_HANDLE
;
1903 max
= HCI_MIN_ADV_MONITOR_HANDLE
+ HCI_MAX_ADV_MONITOR_NUM_HANDLES
;
1904 handle
= idr_alloc(&hdev
->adv_monitors_idr
, monitor
, min
, max
,
1907 hci_dev_unlock(hdev
);
1912 monitor
->handle
= handle
;
1914 if (!hdev_is_powered(hdev
))
1917 switch (hci_get_adv_monitor_offload_ext(hdev
)) {
1918 case HCI_ADV_MONITOR_EXT_NONE
:
1919 bt_dev_dbg(hdev
, "add monitor %d status %d",
1920 monitor
->handle
, status
);
1921 /* Message was not forwarded to controller - not an error */
1924 case HCI_ADV_MONITOR_EXT_MSFT
:
1925 status
= msft_add_monitor_pattern(hdev
, monitor
);
1926 bt_dev_dbg(hdev
, "add monitor %d msft status %d",
1934 /* Attempts to tell the controller and free the monitor. If somehow the
1935 * controller doesn't have a corresponding handle, remove anyway.
1936 * This function requires the caller holds hci_req_sync_lock.
1938 static int hci_remove_adv_monitor(struct hci_dev
*hdev
,
1939 struct adv_monitor
*monitor
)
1944 switch (hci_get_adv_monitor_offload_ext(hdev
)) {
1945 case HCI_ADV_MONITOR_EXT_NONE
: /* also goes here when powered off */
1946 bt_dev_dbg(hdev
, "remove monitor %d status %d",
1947 monitor
->handle
, status
);
1950 case HCI_ADV_MONITOR_EXT_MSFT
:
1951 handle
= monitor
->handle
;
1952 status
= msft_remove_monitor(hdev
, monitor
);
1953 bt_dev_dbg(hdev
, "remove monitor %d msft status %d",
1958 /* In case no matching handle registered, just free the monitor */
1959 if (status
== -ENOENT
)
1965 if (status
== -ENOENT
)
1966 bt_dev_warn(hdev
, "Removing monitor with no matching handle %d",
1968 hci_free_adv_monitor(hdev
, monitor
);
1973 /* This function requires the caller holds hci_req_sync_lock */
1974 int hci_remove_single_adv_monitor(struct hci_dev
*hdev
, u16 handle
)
1976 struct adv_monitor
*monitor
= idr_find(&hdev
->adv_monitors_idr
, handle
);
1981 return hci_remove_adv_monitor(hdev
, monitor
);
1984 /* This function requires the caller holds hci_req_sync_lock */
1985 int hci_remove_all_adv_monitor(struct hci_dev
*hdev
)
1987 struct adv_monitor
*monitor
;
1988 int idr_next_id
= 0;
1992 monitor
= idr_get_next(&hdev
->adv_monitors_idr
, &idr_next_id
);
1996 status
= hci_remove_adv_monitor(hdev
, monitor
);
2006 /* This function requires the caller holds hdev->lock */
2007 bool hci_is_adv_monitoring(struct hci_dev
*hdev
)
2009 return !idr_is_empty(&hdev
->adv_monitors_idr
);
2012 int hci_get_adv_monitor_offload_ext(struct hci_dev
*hdev
)
2014 if (msft_monitor_supported(hdev
))
2015 return HCI_ADV_MONITOR_EXT_MSFT
;
2017 return HCI_ADV_MONITOR_EXT_NONE
;
2020 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2021 bdaddr_t
*bdaddr
, u8 type
)
2023 struct bdaddr_list
*b
;
2025 list_for_each_entry(b
, bdaddr_list
, list
) {
2026 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2033 struct bdaddr_list_with_irk
*hci_bdaddr_list_lookup_with_irk(
2034 struct list_head
*bdaddr_list
, bdaddr_t
*bdaddr
,
2037 struct bdaddr_list_with_irk
*b
;
2039 list_for_each_entry(b
, bdaddr_list
, list
) {
2040 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2047 struct bdaddr_list_with_flags
*
2048 hci_bdaddr_list_lookup_with_flags(struct list_head
*bdaddr_list
,
2049 bdaddr_t
*bdaddr
, u8 type
)
2051 struct bdaddr_list_with_flags
*b
;
2053 list_for_each_entry(b
, bdaddr_list
, list
) {
2054 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2061 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2063 struct bdaddr_list
*b
, *n
;
2065 list_for_each_entry_safe(b
, n
, bdaddr_list
, list
) {
2071 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2073 struct bdaddr_list
*entry
;
2075 if (!bacmp(bdaddr
, BDADDR_ANY
))
2078 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2081 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2085 bacpy(&entry
->bdaddr
, bdaddr
);
2086 entry
->bdaddr_type
= type
;
2088 list_add(&entry
->list
, list
);
2093 int hci_bdaddr_list_add_with_irk(struct list_head
*list
, bdaddr_t
*bdaddr
,
2094 u8 type
, u8
*peer_irk
, u8
*local_irk
)
2096 struct bdaddr_list_with_irk
*entry
;
2098 if (!bacmp(bdaddr
, BDADDR_ANY
))
2101 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2104 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2108 bacpy(&entry
->bdaddr
, bdaddr
);
2109 entry
->bdaddr_type
= type
;
2112 memcpy(entry
->peer_irk
, peer_irk
, 16);
2115 memcpy(entry
->local_irk
, local_irk
, 16);
2117 list_add(&entry
->list
, list
);
2122 int hci_bdaddr_list_add_with_flags(struct list_head
*list
, bdaddr_t
*bdaddr
,
2125 struct bdaddr_list_with_flags
*entry
;
2127 if (!bacmp(bdaddr
, BDADDR_ANY
))
2130 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2133 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2137 bacpy(&entry
->bdaddr
, bdaddr
);
2138 entry
->bdaddr_type
= type
;
2139 entry
->flags
= flags
;
2141 list_add(&entry
->list
, list
);
2146 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2148 struct bdaddr_list
*entry
;
2150 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2151 hci_bdaddr_list_clear(list
);
2155 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2159 list_del(&entry
->list
);
2165 int hci_bdaddr_list_del_with_irk(struct list_head
*list
, bdaddr_t
*bdaddr
,
2168 struct bdaddr_list_with_irk
*entry
;
2170 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2171 hci_bdaddr_list_clear(list
);
2175 entry
= hci_bdaddr_list_lookup_with_irk(list
, bdaddr
, type
);
2179 list_del(&entry
->list
);
2185 int hci_bdaddr_list_del_with_flags(struct list_head
*list
, bdaddr_t
*bdaddr
,
2188 struct bdaddr_list_with_flags
*entry
;
2190 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2191 hci_bdaddr_list_clear(list
);
2195 entry
= hci_bdaddr_list_lookup_with_flags(list
, bdaddr
, type
);
2199 list_del(&entry
->list
);
2205 /* This function requires the caller holds hdev->lock */
2206 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2207 bdaddr_t
*addr
, u8 addr_type
)
2209 struct hci_conn_params
*params
;
2211 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2212 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2213 params
->addr_type
== addr_type
) {
2221 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2222 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2223 bdaddr_t
*addr
, u8 addr_type
)
2225 struct hci_conn_params
*param
;
2229 list_for_each_entry_rcu(param
, list
, action
) {
2230 if (bacmp(¶m
->addr
, addr
) == 0 &&
2231 param
->addr_type
== addr_type
) {
2242 /* This function requires the caller holds hdev->lock */
2243 void hci_pend_le_list_del_init(struct hci_conn_params
*param
)
2245 if (list_empty(¶m
->action
))
2248 list_del_rcu(¶m
->action
);
2250 INIT_LIST_HEAD(¶m
->action
);
2253 /* This function requires the caller holds hdev->lock */
2254 void hci_pend_le_list_add(struct hci_conn_params
*param
,
2255 struct list_head
*list
)
2257 list_add_rcu(¶m
->action
, list
);
2260 /* This function requires the caller holds hdev->lock */
2261 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2262 bdaddr_t
*addr
, u8 addr_type
)
2264 struct hci_conn_params
*params
;
2266 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2270 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2272 bt_dev_err(hdev
, "out of memory");
2276 bacpy(¶ms
->addr
, addr
);
2277 params
->addr_type
= addr_type
;
2279 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2280 INIT_LIST_HEAD(¶ms
->action
);
2282 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2283 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2284 params
->conn_latency
= hdev
->le_conn_latency
;
2285 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2286 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2288 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2293 void hci_conn_params_free(struct hci_conn_params
*params
)
2295 hci_pend_le_list_del_init(params
);
2298 hci_conn_drop(params
->conn
);
2299 hci_conn_put(params
->conn
);
2302 list_del(¶ms
->list
);
2306 /* This function requires the caller holds hdev->lock */
2307 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2309 struct hci_conn_params
*params
;
2311 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2315 hci_conn_params_free(params
);
2317 hci_update_passive_scan(hdev
);
2319 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2322 /* This function requires the caller holds hdev->lock */
2323 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
2325 struct hci_conn_params
*params
, *tmp
;
2327 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
2328 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
2331 /* If trying to establish one time connection to disabled
2332 * device, leave the params, but mark them as just once.
2334 if (params
->explicit_connect
) {
2335 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2339 hci_conn_params_free(params
);
2342 BT_DBG("All LE disabled connection parameters were removed");
2345 /* This function requires the caller holds hdev->lock */
2346 static void hci_conn_params_clear_all(struct hci_dev
*hdev
)
2348 struct hci_conn_params
*params
, *tmp
;
2350 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
2351 hci_conn_params_free(params
);
2353 BT_DBG("All LE connection parameters were removed");
2356 /* Copy the Identity Address of the controller.
2358 * If the controller has a public BD_ADDR, then by default use that one.
2359 * If this is a LE only controller without a public address, default to
2360 * the static random address.
2362 * For debugging purposes it is possible to force controllers with a
2363 * public address to use the static random address instead.
2365 * In case BR/EDR has been disabled on a dual-mode controller and
2366 * userspace has configured a static address, then that address
2367 * becomes the identity address instead of the public BR/EDR address.
2369 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2372 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
2373 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
2374 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
2375 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2376 bacpy(bdaddr
, &hdev
->static_addr
);
2377 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
2379 bacpy(bdaddr
, &hdev
->bdaddr
);
2380 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
2384 static void hci_clear_wake_reason(struct hci_dev
*hdev
)
2388 hdev
->wake_reason
= 0;
2389 bacpy(&hdev
->wake_addr
, BDADDR_ANY
);
2390 hdev
->wake_addr_type
= 0;
2392 hci_dev_unlock(hdev
);
2395 static int hci_suspend_notifier(struct notifier_block
*nb
, unsigned long action
,
2398 struct hci_dev
*hdev
=
2399 container_of(nb
, struct hci_dev
, suspend_notifier
);
2402 /* Userspace has full control of this device. Do nothing. */
2403 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2406 /* To avoid a potential race with hci_unregister_dev. */
2410 case PM_HIBERNATION_PREPARE
:
2411 case PM_SUSPEND_PREPARE
:
2412 ret
= hci_suspend_dev(hdev
);
2414 case PM_POST_HIBERNATION
:
2415 case PM_POST_SUSPEND
:
2416 ret
= hci_resume_dev(hdev
);
2421 bt_dev_err(hdev
, "Suspend notifier action (%lu) failed: %d",
2428 /* Alloc HCI device */
2429 struct hci_dev
*hci_alloc_dev_priv(int sizeof_priv
)
2431 struct hci_dev
*hdev
;
2432 unsigned int alloc_size
;
2434 alloc_size
= sizeof(*hdev
);
2436 /* Fixme: May need ALIGN-ment? */
2437 alloc_size
+= sizeof_priv
;
2440 hdev
= kzalloc(alloc_size
, GFP_KERNEL
);
2444 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2445 hdev
->esco_type
= (ESCO_HV1
);
2446 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2447 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2448 hdev
->io_capability
= 0x03; /* No Input No Output */
2449 hdev
->manufacturer
= 0xffff; /* Default to internal use */
2450 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2451 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2452 hdev
->adv_instance_cnt
= 0;
2453 hdev
->cur_adv_instance
= 0x00;
2454 hdev
->adv_instance_timeout
= 0;
2456 hdev
->advmon_allowlist_duration
= 300;
2457 hdev
->advmon_no_filter_duration
= 500;
2458 hdev
->enable_advmon_interleave_scan
= 0x00; /* Default to disable */
2460 hdev
->sniff_max_interval
= 800;
2461 hdev
->sniff_min_interval
= 80;
2463 hdev
->le_adv_channel_map
= 0x07;
2464 hdev
->le_adv_min_interval
= 0x0800;
2465 hdev
->le_adv_max_interval
= 0x0800;
2466 hdev
->le_scan_interval
= DISCOV_LE_SCAN_INT_FAST
;
2467 hdev
->le_scan_window
= DISCOV_LE_SCAN_WIN_FAST
;
2468 hdev
->le_scan_int_suspend
= DISCOV_LE_SCAN_INT_SLOW1
;
2469 hdev
->le_scan_window_suspend
= DISCOV_LE_SCAN_WIN_SLOW1
;
2470 hdev
->le_scan_int_discovery
= DISCOV_LE_SCAN_INT
;
2471 hdev
->le_scan_window_discovery
= DISCOV_LE_SCAN_WIN
;
2472 hdev
->le_scan_int_adv_monitor
= DISCOV_LE_SCAN_INT_FAST
;
2473 hdev
->le_scan_window_adv_monitor
= DISCOV_LE_SCAN_WIN_FAST
;
2474 hdev
->le_scan_int_connect
= DISCOV_LE_SCAN_INT_CONN
;
2475 hdev
->le_scan_window_connect
= DISCOV_LE_SCAN_WIN_CONN
;
2476 hdev
->le_conn_min_interval
= 0x0018;
2477 hdev
->le_conn_max_interval
= 0x0028;
2478 hdev
->le_conn_latency
= 0x0000;
2479 hdev
->le_supv_timeout
= 0x002a;
2480 hdev
->le_def_tx_len
= 0x001b;
2481 hdev
->le_def_tx_time
= 0x0148;
2482 hdev
->le_max_tx_len
= 0x001b;
2483 hdev
->le_max_tx_time
= 0x0148;
2484 hdev
->le_max_rx_len
= 0x001b;
2485 hdev
->le_max_rx_time
= 0x0148;
2486 hdev
->le_max_key_size
= SMP_MAX_ENC_KEY_SIZE
;
2487 hdev
->le_min_key_size
= SMP_MIN_ENC_KEY_SIZE
;
2488 hdev
->le_tx_def_phys
= HCI_LE_SET_PHY_1M
;
2489 hdev
->le_rx_def_phys
= HCI_LE_SET_PHY_1M
;
2490 hdev
->le_num_of_adv_sets
= HCI_MAX_ADV_INSTANCES
;
2491 hdev
->def_multi_adv_rotation_duration
= HCI_DEFAULT_ADV_DURATION
;
2492 hdev
->def_le_autoconnect_timeout
= HCI_LE_CONN_TIMEOUT
;
2493 hdev
->min_le_tx_power
= HCI_TX_POWER_INVALID
;
2494 hdev
->max_le_tx_power
= HCI_TX_POWER_INVALID
;
2496 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
2497 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
2498 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
2499 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
2500 hdev
->auth_payload_timeout
= DEFAULT_AUTH_PAYLOAD_TIMEOUT
;
2501 hdev
->min_enc_key_size
= HCI_MIN_ENC_KEY_SIZE
;
2503 /* default 1.28 sec page scan */
2504 hdev
->def_page_scan_type
= PAGE_SCAN_TYPE_STANDARD
;
2505 hdev
->def_page_scan_int
= 0x0800;
2506 hdev
->def_page_scan_window
= 0x0012;
2508 mutex_init(&hdev
->lock
);
2509 mutex_init(&hdev
->req_lock
);
2511 ida_init(&hdev
->unset_handle_ida
);
2513 INIT_LIST_HEAD(&hdev
->mesh_pending
);
2514 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2515 INIT_LIST_HEAD(&hdev
->reject_list
);
2516 INIT_LIST_HEAD(&hdev
->accept_list
);
2517 INIT_LIST_HEAD(&hdev
->uuids
);
2518 INIT_LIST_HEAD(&hdev
->link_keys
);
2519 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2520 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
2521 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2522 INIT_LIST_HEAD(&hdev
->le_accept_list
);
2523 INIT_LIST_HEAD(&hdev
->le_resolv_list
);
2524 INIT_LIST_HEAD(&hdev
->le_conn_params
);
2525 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
2526 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
2527 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2528 INIT_LIST_HEAD(&hdev
->adv_instances
);
2529 INIT_LIST_HEAD(&hdev
->blocked_keys
);
2530 INIT_LIST_HEAD(&hdev
->monitored_devices
);
2532 INIT_LIST_HEAD(&hdev
->local_codecs
);
2533 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2534 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2535 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2536 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2537 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
2539 hci_cmd_sync_init(hdev
);
2541 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2543 skb_queue_head_init(&hdev
->rx_q
);
2544 skb_queue_head_init(&hdev
->cmd_q
);
2545 skb_queue_head_init(&hdev
->raw_q
);
2547 init_waitqueue_head(&hdev
->req_wait_q
);
2549 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
2550 INIT_DELAYED_WORK(&hdev
->ncmd_timer
, hci_ncmd_timeout
);
2552 hci_devcd_setup(hdev
);
2554 hci_init_sysfs(hdev
);
2555 discovery_init(hdev
);
2559 EXPORT_SYMBOL(hci_alloc_dev_priv
);
2561 /* Free HCI device */
2562 void hci_free_dev(struct hci_dev
*hdev
)
2564 /* will free via device release */
2565 put_device(&hdev
->dev
);
2567 EXPORT_SYMBOL(hci_free_dev
);
2569 /* Register HCI device */
2570 int hci_register_dev(struct hci_dev
*hdev
)
2574 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
2577 id
= ida_alloc_max(&hci_index_ida
, HCI_MAX_ID
- 1, GFP_KERNEL
);
2581 error
= dev_set_name(&hdev
->dev
, "hci%u", id
);
2585 hdev
->name
= dev_name(&hdev
->dev
);
2588 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2590 hdev
->workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
, hdev
->name
);
2591 if (!hdev
->workqueue
) {
2596 hdev
->req_workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
,
2598 if (!hdev
->req_workqueue
) {
2599 destroy_workqueue(hdev
->workqueue
);
2604 if (!IS_ERR_OR_NULL(bt_debugfs
))
2605 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
2607 error
= device_add(&hdev
->dev
);
2611 hci_leds_init(hdev
);
2613 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
2614 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
2617 if (rfkill_register(hdev
->rfkill
) < 0) {
2618 rfkill_destroy(hdev
->rfkill
);
2619 hdev
->rfkill
= NULL
;
2623 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
2624 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2626 hci_dev_set_flag(hdev
, HCI_SETUP
);
2627 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
2629 /* Assume BR/EDR support until proven otherwise (such as
2630 * through reading supported features during init.
2632 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
2634 write_lock(&hci_dev_list_lock
);
2635 list_add(&hdev
->list
, &hci_dev_list
);
2636 write_unlock(&hci_dev_list_lock
);
2638 /* Devices that are marked for raw-only usage are unconfigured
2639 * and should not be included in normal operation.
2641 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
2642 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
2644 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2648 hdev
->conn_flags
|= HCI_CONN_FLAG_REMOTE_WAKEUP
;
2650 hci_sock_dev_event(hdev
, HCI_DEV_REG
);
2653 error
= hci_register_suspend_notifier(hdev
);
2655 BT_WARN("register suspend notifier failed error:%d\n", error
);
2657 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
2659 idr_init(&hdev
->adv_monitors_idr
);
2660 msft_register(hdev
);
2665 debugfs_remove_recursive(hdev
->debugfs
);
2666 destroy_workqueue(hdev
->workqueue
);
2667 destroy_workqueue(hdev
->req_workqueue
);
2669 ida_free(&hci_index_ida
, hdev
->id
);
2673 EXPORT_SYMBOL(hci_register_dev
);
2675 /* Unregister HCI device */
2676 void hci_unregister_dev(struct hci_dev
*hdev
)
2678 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2680 mutex_lock(&hdev
->unregister_lock
);
2681 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
2682 mutex_unlock(&hdev
->unregister_lock
);
2684 write_lock(&hci_dev_list_lock
);
2685 list_del(&hdev
->list
);
2686 write_unlock(&hci_dev_list_lock
);
2688 disable_work_sync(&hdev
->rx_work
);
2689 disable_work_sync(&hdev
->cmd_work
);
2690 disable_work_sync(&hdev
->tx_work
);
2691 disable_work_sync(&hdev
->power_on
);
2692 disable_work_sync(&hdev
->error_reset
);
2694 hci_cmd_sync_clear(hdev
);
2696 hci_unregister_suspend_notifier(hdev
);
2698 hci_dev_do_close(hdev
);
2700 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
2701 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2702 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
2704 mgmt_index_removed(hdev
);
2705 hci_dev_unlock(hdev
);
2708 /* mgmt_index_removed should take care of emptying the
2710 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
2712 hci_sock_dev_event(hdev
, HCI_DEV_UNREG
);
2715 rfkill_unregister(hdev
->rfkill
);
2716 rfkill_destroy(hdev
->rfkill
);
2719 device_del(&hdev
->dev
);
2720 /* Actual cleanup is deferred until hci_release_dev(). */
2723 EXPORT_SYMBOL(hci_unregister_dev
);
2725 /* Release HCI device */
2726 void hci_release_dev(struct hci_dev
*hdev
)
2728 debugfs_remove_recursive(hdev
->debugfs
);
2729 kfree_const(hdev
->hw_info
);
2730 kfree_const(hdev
->fw_info
);
2732 destroy_workqueue(hdev
->workqueue
);
2733 destroy_workqueue(hdev
->req_workqueue
);
2736 hci_bdaddr_list_clear(&hdev
->reject_list
);
2737 hci_bdaddr_list_clear(&hdev
->accept_list
);
2738 hci_uuids_clear(hdev
);
2739 hci_link_keys_clear(hdev
);
2740 hci_smp_ltks_clear(hdev
);
2741 hci_smp_irks_clear(hdev
);
2742 hci_remote_oob_data_clear(hdev
);
2743 hci_adv_instances_clear(hdev
);
2744 hci_adv_monitors_clear(hdev
);
2745 hci_bdaddr_list_clear(&hdev
->le_accept_list
);
2746 hci_bdaddr_list_clear(&hdev
->le_resolv_list
);
2747 hci_conn_params_clear_all(hdev
);
2748 hci_discovery_filter_clear(hdev
);
2749 hci_blocked_keys_clear(hdev
);
2750 hci_codec_list_clear(&hdev
->local_codecs
);
2752 hci_dev_unlock(hdev
);
2754 ida_destroy(&hdev
->unset_handle_ida
);
2755 ida_free(&hci_index_ida
, hdev
->id
);
2756 kfree_skb(hdev
->sent_cmd
);
2757 kfree_skb(hdev
->req_skb
);
2758 kfree_skb(hdev
->recv_event
);
2761 EXPORT_SYMBOL(hci_release_dev
);
2763 int hci_register_suspend_notifier(struct hci_dev
*hdev
)
2767 if (!hdev
->suspend_notifier
.notifier_call
&&
2768 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER
, &hdev
->quirks
)) {
2769 hdev
->suspend_notifier
.notifier_call
= hci_suspend_notifier
;
2770 ret
= register_pm_notifier(&hdev
->suspend_notifier
);
2776 int hci_unregister_suspend_notifier(struct hci_dev
*hdev
)
2780 if (hdev
->suspend_notifier
.notifier_call
) {
2781 ret
= unregister_pm_notifier(&hdev
->suspend_notifier
);
2783 hdev
->suspend_notifier
.notifier_call
= NULL
;
2789 /* Cancel ongoing command synchronously:
2791 * - Cancel command timer
2792 * - Reset command counter
2793 * - Cancel command request
2795 static void hci_cancel_cmd_sync(struct hci_dev
*hdev
, int err
)
2797 bt_dev_dbg(hdev
, "err 0x%2.2x", err
);
2799 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
2800 disable_delayed_work_sync(&hdev
->cmd_timer
);
2801 disable_delayed_work_sync(&hdev
->ncmd_timer
);
2803 cancel_delayed_work_sync(&hdev
->cmd_timer
);
2804 cancel_delayed_work_sync(&hdev
->ncmd_timer
);
2807 atomic_set(&hdev
->cmd_cnt
, 1);
2809 hci_cmd_sync_cancel_sync(hdev
, err
);
2812 /* Suspend HCI device */
2813 int hci_suspend_dev(struct hci_dev
*hdev
)
2817 bt_dev_dbg(hdev
, "");
2819 /* Suspend should only act on when powered. */
2820 if (!hdev_is_powered(hdev
) ||
2821 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
2824 /* If powering down don't attempt to suspend */
2825 if (mgmt_powering_down(hdev
))
2828 /* Cancel potentially blocking sync operation before suspend */
2829 hci_cancel_cmd_sync(hdev
, EHOSTDOWN
);
2831 hci_req_sync_lock(hdev
);
2832 ret
= hci_suspend_sync(hdev
);
2833 hci_req_sync_unlock(hdev
);
2835 hci_clear_wake_reason(hdev
);
2836 mgmt_suspending(hdev
, hdev
->suspend_state
);
2838 hci_sock_dev_event(hdev
, HCI_DEV_SUSPEND
);
2841 EXPORT_SYMBOL(hci_suspend_dev
);
2843 /* Resume HCI device */
2844 int hci_resume_dev(struct hci_dev
*hdev
)
2848 bt_dev_dbg(hdev
, "");
2850 /* Resume should only act on when powered. */
2851 if (!hdev_is_powered(hdev
) ||
2852 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
2855 /* If powering down don't attempt to resume */
2856 if (mgmt_powering_down(hdev
))
2859 hci_req_sync_lock(hdev
);
2860 ret
= hci_resume_sync(hdev
);
2861 hci_req_sync_unlock(hdev
);
2863 mgmt_resuming(hdev
, hdev
->wake_reason
, &hdev
->wake_addr
,
2864 hdev
->wake_addr_type
);
2866 hci_sock_dev_event(hdev
, HCI_DEV_RESUME
);
2869 EXPORT_SYMBOL(hci_resume_dev
);
2871 /* Reset HCI device */
2872 int hci_reset_dev(struct hci_dev
*hdev
)
2874 static const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
2875 struct sk_buff
*skb
;
2877 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
2881 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
2882 skb_put_data(skb
, hw_err
, 3);
2884 bt_dev_err(hdev
, "Injecting HCI hardware error event");
2886 /* Send Hardware Error to upper stack */
2887 return hci_recv_frame(hdev
, skb
);
2889 EXPORT_SYMBOL(hci_reset_dev
);
2891 static u8
hci_dev_classify_pkt_type(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2893 if (hdev
->classify_pkt_type
)
2894 return hdev
->classify_pkt_type(hdev
, skb
);
2896 return hci_skb_pkt_type(skb
);
2899 /* Receive frame from HCI drivers */
2900 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2904 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
2905 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
2910 /* Check if the driver agree with packet type classification */
2911 dev_pkt_type
= hci_dev_classify_pkt_type(hdev
, skb
);
2912 if (hci_skb_pkt_type(skb
) != dev_pkt_type
) {
2913 hci_skb_pkt_type(skb
) = dev_pkt_type
;
2916 switch (hci_skb_pkt_type(skb
)) {
2919 case HCI_ACLDATA_PKT
:
2920 /* Detect if ISO packet has been sent as ACL */
2921 if (hci_conn_num(hdev
, ISO_LINK
)) {
2922 __u16 handle
= __le16_to_cpu(hci_acl_hdr(skb
)->handle
);
2925 type
= hci_conn_lookup_type(hdev
, hci_handle(handle
));
2926 if (type
== ISO_LINK
)
2927 hci_skb_pkt_type(skb
) = HCI_ISODATA_PKT
;
2930 case HCI_SCODATA_PKT
:
2932 case HCI_ISODATA_PKT
:
2940 bt_cb(skb
)->incoming
= 1;
2943 __net_timestamp(skb
);
2945 skb_queue_tail(&hdev
->rx_q
, skb
);
2946 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
2950 EXPORT_SYMBOL(hci_recv_frame
);
2952 /* Receive diagnostic message from HCI drivers */
2953 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2955 /* Mark as diagnostic packet */
2956 hci_skb_pkt_type(skb
) = HCI_DIAG_PKT
;
2959 __net_timestamp(skb
);
2961 skb_queue_tail(&hdev
->rx_q
, skb
);
2962 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
2966 EXPORT_SYMBOL(hci_recv_diag
);
2968 void hci_set_hw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
2972 va_start(vargs
, fmt
);
2973 kfree_const(hdev
->hw_info
);
2974 hdev
->hw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
2977 EXPORT_SYMBOL(hci_set_hw_info
);
2979 void hci_set_fw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
2983 va_start(vargs
, fmt
);
2984 kfree_const(hdev
->fw_info
);
2985 hdev
->fw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
2988 EXPORT_SYMBOL(hci_set_fw_info
);
2990 /* ---- Interface to upper protocols ---- */
2992 int hci_register_cb(struct hci_cb
*cb
)
2994 BT_DBG("%p name %s", cb
, cb
->name
);
2996 mutex_lock(&hci_cb_list_lock
);
2997 list_add_tail(&cb
->list
, &hci_cb_list
);
2998 mutex_unlock(&hci_cb_list_lock
);
3002 EXPORT_SYMBOL(hci_register_cb
);
3004 int hci_unregister_cb(struct hci_cb
*cb
)
3006 BT_DBG("%p name %s", cb
, cb
->name
);
3008 mutex_lock(&hci_cb_list_lock
);
3009 list_del(&cb
->list
);
3010 mutex_unlock(&hci_cb_list_lock
);
3014 EXPORT_SYMBOL(hci_unregister_cb
);
3016 static int hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3020 BT_DBG("%s type %d len %d", hdev
->name
, hci_skb_pkt_type(skb
),
3024 __net_timestamp(skb
);
3026 /* Send copy to monitor */
3027 hci_send_to_monitor(hdev
, skb
);
3029 if (atomic_read(&hdev
->promisc
)) {
3030 /* Send copy to the sockets */
3031 hci_send_to_sock(hdev
, skb
);
3034 /* Get rid of skb owner, prior to sending to the driver. */
3037 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3042 err
= hdev
->send(hdev
, skb
);
3044 bt_dev_err(hdev
, "sending frame failed (%d)", err
);
3052 /* Send HCI command */
3053 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3056 struct sk_buff
*skb
;
3058 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3060 skb
= hci_cmd_sync_alloc(hdev
, opcode
, plen
, param
, NULL
);
3062 bt_dev_err(hdev
, "no memory for command");
3066 /* Stand-alone HCI commands must be flagged as
3067 * single-command requests.
3069 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
3071 skb_queue_tail(&hdev
->cmd_q
, skb
);
3072 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3077 int __hci_cmd_send(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3080 struct sk_buff
*skb
;
3082 if (hci_opcode_ogf(opcode
) != 0x3f) {
3083 /* A controller receiving a command shall respond with either
3084 * a Command Status Event or a Command Complete Event.
3085 * Therefore, all standard HCI commands must be sent via the
3086 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3087 * Some vendors do not comply with this rule for vendor-specific
3088 * commands and do not return any event. We want to support
3089 * unresponded commands for such cases only.
3091 bt_dev_err(hdev
, "unresponded command not supported");
3095 skb
= hci_cmd_sync_alloc(hdev
, opcode
, plen
, param
, NULL
);
3097 bt_dev_err(hdev
, "no memory for command (opcode 0x%4.4x)",
3102 hci_send_frame(hdev
, skb
);
3106 EXPORT_SYMBOL(__hci_cmd_send
);
3108 /* Get data from the previously sent command */
3109 static void *hci_cmd_data(struct sk_buff
*skb
, __u16 opcode
)
3111 struct hci_command_hdr
*hdr
;
3113 if (!skb
|| skb
->len
< HCI_COMMAND_HDR_SIZE
)
3116 hdr
= (void *)skb
->data
;
3118 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3121 return skb
->data
+ HCI_COMMAND_HDR_SIZE
;
3124 /* Get data from the previously sent command */
3125 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3129 /* Check if opcode matches last sent command */
3130 data
= hci_cmd_data(hdev
->sent_cmd
, opcode
);
3132 /* Check if opcode matches last request */
3133 data
= hci_cmd_data(hdev
->req_skb
, opcode
);
3138 /* Get data from last received event */
3139 void *hci_recv_event_data(struct hci_dev
*hdev
, __u8 event
)
3141 struct hci_event_hdr
*hdr
;
3144 if (!hdev
->recv_event
)
3147 hdr
= (void *)hdev
->recv_event
->data
;
3148 offset
= sizeof(*hdr
);
3150 if (hdr
->evt
!= event
) {
3151 /* In case of LE metaevent check the subevent match */
3152 if (hdr
->evt
== HCI_EV_LE_META
) {
3153 struct hci_ev_le_meta
*ev
;
3155 ev
= (void *)hdev
->recv_event
->data
+ offset
;
3156 offset
+= sizeof(*ev
);
3157 if (ev
->subevent
== event
)
3164 bt_dev_dbg(hdev
, "event 0x%2.2x", event
);
3166 return hdev
->recv_event
->data
+ offset
;
3170 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3172 struct hci_acl_hdr
*hdr
;
3175 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3176 skb_reset_transport_header(skb
);
3177 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3178 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3179 hdr
->dlen
= cpu_to_le16(len
);
3182 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3183 struct sk_buff
*skb
, __u16 flags
)
3185 struct hci_conn
*conn
= chan
->conn
;
3186 struct hci_dev
*hdev
= conn
->hdev
;
3187 struct sk_buff
*list
;
3189 skb
->len
= skb_headlen(skb
);
3192 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3194 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3196 list
= skb_shinfo(skb
)->frag_list
;
3198 /* Non fragmented */
3199 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3201 skb_queue_tail(queue
, skb
);
3204 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3206 skb_shinfo(skb
)->frag_list
= NULL
;
3208 /* Queue all fragments atomically. We need to use spin_lock_bh
3209 * here because of 6LoWPAN links, as there this function is
3210 * called from softirq and using normal spin lock could cause
3213 spin_lock_bh(&queue
->lock
);
3215 __skb_queue_tail(queue
, skb
);
3217 flags
&= ~ACL_START
;
3220 skb
= list
; list
= list
->next
;
3222 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3223 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3225 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3227 __skb_queue_tail(queue
, skb
);
3230 spin_unlock_bh(&queue
->lock
);
3234 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3236 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3238 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3240 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3242 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3246 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3248 struct hci_dev
*hdev
= conn
->hdev
;
3249 struct hci_sco_hdr hdr
;
3251 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3253 hdr
.handle
= cpu_to_le16(conn
->handle
);
3254 hdr
.dlen
= skb
->len
;
3256 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3257 skb_reset_transport_header(skb
);
3258 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3260 hci_skb_pkt_type(skb
) = HCI_SCODATA_PKT
;
3262 skb_queue_tail(&conn
->data_q
, skb
);
3263 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3267 static void hci_add_iso_hdr(struct sk_buff
*skb
, __u16 handle
, __u8 flags
)
3269 struct hci_iso_hdr
*hdr
;
3272 skb_push(skb
, HCI_ISO_HDR_SIZE
);
3273 skb_reset_transport_header(skb
);
3274 hdr
= (struct hci_iso_hdr
*)skb_transport_header(skb
);
3275 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3276 hdr
->dlen
= cpu_to_le16(len
);
3279 static void hci_queue_iso(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
3280 struct sk_buff
*skb
)
3282 struct hci_dev
*hdev
= conn
->hdev
;
3283 struct sk_buff
*list
;
3286 skb
->len
= skb_headlen(skb
);
3289 hci_skb_pkt_type(skb
) = HCI_ISODATA_PKT
;
3291 list
= skb_shinfo(skb
)->frag_list
;
3293 flags
= hci_iso_flags_pack(list
? ISO_START
: ISO_SINGLE
, 0x00);
3294 hci_add_iso_hdr(skb
, conn
->handle
, flags
);
3297 /* Non fragmented */
3298 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3300 skb_queue_tail(queue
, skb
);
3303 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3305 skb_shinfo(skb
)->frag_list
= NULL
;
3307 __skb_queue_tail(queue
, skb
);
3310 skb
= list
; list
= list
->next
;
3312 hci_skb_pkt_type(skb
) = HCI_ISODATA_PKT
;
3313 flags
= hci_iso_flags_pack(list
? ISO_CONT
: ISO_END
,
3315 hci_add_iso_hdr(skb
, conn
->handle
, flags
);
3317 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3319 __skb_queue_tail(queue
, skb
);
3324 void hci_send_iso(struct hci_conn
*conn
, struct sk_buff
*skb
)
3326 struct hci_dev
*hdev
= conn
->hdev
;
3328 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3330 hci_queue_iso(conn
, &conn
->data_q
, skb
);
3332 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3335 /* ---- HCI TX task (outgoing data) ---- */
3337 /* HCI Connection scheduler */
3338 static inline void hci_quote_sent(struct hci_conn
*conn
, int num
, int *quote
)
3340 struct hci_dev
*hdev
;
3350 switch (conn
->type
) {
3352 cnt
= hdev
->acl_cnt
;
3356 cnt
= hdev
->sco_cnt
;
3359 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3362 cnt
= hdev
->iso_mtu
? hdev
->iso_cnt
:
3363 hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3367 bt_dev_err(hdev
, "unknown link type %d", conn
->type
);
3374 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3377 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3378 struct hci_conn
*conn
= NULL
, *c
;
3379 unsigned int num
= 0, min
= ~0;
3381 /* We don't have to lock device here. Connections are always
3382 * added and removed with TX task disabled. */
3386 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3387 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3390 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3395 if (c
->sent
< min
) {
3400 if (hci_conn_num(hdev
, type
) == num
)
3406 hci_quote_sent(conn
, num
, quote
);
3408 BT_DBG("conn %p quote %d", conn
, *quote
);
3412 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3414 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3417 bt_dev_err(hdev
, "link tx timeout");
3421 /* Kill stalled connections */
3422 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3423 if (c
->type
== type
&& c
->sent
) {
3424 bt_dev_err(hdev
, "killing stalled connection %pMR",
3426 /* hci_disconnect might sleep, so, we have to release
3427 * the RCU read lock before calling it.
3430 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3438 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3441 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3442 struct hci_chan
*chan
= NULL
;
3443 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3444 struct hci_conn
*conn
;
3447 BT_DBG("%s", hdev
->name
);
3451 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3452 struct hci_chan
*tmp
;
3454 if (conn
->type
!= type
)
3457 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3462 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3463 struct sk_buff
*skb
;
3465 if (skb_queue_empty(&tmp
->data_q
))
3468 skb
= skb_peek(&tmp
->data_q
);
3469 if (skb
->priority
< cur_prio
)
3472 if (skb
->priority
> cur_prio
) {
3475 cur_prio
= skb
->priority
;
3480 if (conn
->sent
< min
) {
3486 if (hci_conn_num(hdev
, type
) == conn_num
)
3495 hci_quote_sent(chan
->conn
, num
, quote
);
3497 BT_DBG("chan %p quote %d", chan
, *quote
);
3501 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3503 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3504 struct hci_conn
*conn
;
3507 BT_DBG("%s", hdev
->name
);
3511 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3512 struct hci_chan
*chan
;
3514 if (conn
->type
!= type
)
3517 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3522 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3523 struct sk_buff
*skb
;
3530 if (skb_queue_empty(&chan
->data_q
))
3533 skb
= skb_peek(&chan
->data_q
);
3534 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3537 skb
->priority
= HCI_PRIO_MAX
- 1;
3539 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3543 if (hci_conn_num(hdev
, type
) == num
)
3551 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
, u8 type
)
3553 unsigned long last_tx
;
3555 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
3560 last_tx
= hdev
->le_last_tx
;
3563 last_tx
= hdev
->acl_last_tx
;
3567 /* tx timeout must be longer than maximum link supervision timeout
3570 if (!cnt
&& time_after(jiffies
, last_tx
+ HCI_ACL_TX_TIMEOUT
))
3571 hci_link_tx_to(hdev
, type
);
3575 static void hci_sched_sco(struct hci_dev
*hdev
)
3577 struct hci_conn
*conn
;
3578 struct sk_buff
*skb
;
3581 BT_DBG("%s", hdev
->name
);
3583 if (!hci_conn_num(hdev
, SCO_LINK
))
3586 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3587 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3588 BT_DBG("skb %p len %d", skb
, skb
->len
);
3589 hci_send_frame(hdev
, skb
);
3592 if (conn
->sent
== ~0)
3598 static void hci_sched_esco(struct hci_dev
*hdev
)
3600 struct hci_conn
*conn
;
3601 struct sk_buff
*skb
;
3604 BT_DBG("%s", hdev
->name
);
3606 if (!hci_conn_num(hdev
, ESCO_LINK
))
3609 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3611 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3612 BT_DBG("skb %p len %d", skb
, skb
->len
);
3613 hci_send_frame(hdev
, skb
);
3616 if (conn
->sent
== ~0)
3622 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3624 unsigned int cnt
= hdev
->acl_cnt
;
3625 struct hci_chan
*chan
;
3626 struct sk_buff
*skb
;
3629 __check_timeout(hdev
, cnt
, ACL_LINK
);
3631 while (hdev
->acl_cnt
&&
3632 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3633 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3634 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3635 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3636 skb
->len
, skb
->priority
);
3638 /* Stop if priority has changed */
3639 if (skb
->priority
< priority
)
3642 skb
= skb_dequeue(&chan
->data_q
);
3644 hci_conn_enter_active_mode(chan
->conn
,
3645 bt_cb(skb
)->force_active
);
3647 hci_send_frame(hdev
, skb
);
3648 hdev
->acl_last_tx
= jiffies
;
3654 /* Send pending SCO packets right away */
3655 hci_sched_sco(hdev
);
3656 hci_sched_esco(hdev
);
3660 if (cnt
!= hdev
->acl_cnt
)
3661 hci_prio_recalculate(hdev
, ACL_LINK
);
3664 static void hci_sched_acl(struct hci_dev
*hdev
)
3666 BT_DBG("%s", hdev
->name
);
3668 /* No ACL link over BR/EDR controller */
3669 if (!hci_conn_num(hdev
, ACL_LINK
))
3672 hci_sched_acl_pkt(hdev
);
3675 static void hci_sched_le(struct hci_dev
*hdev
)
3677 struct hci_chan
*chan
;
3678 struct sk_buff
*skb
;
3679 int quote
, *cnt
, tmp
;
3681 BT_DBG("%s", hdev
->name
);
3683 if (!hci_conn_num(hdev
, LE_LINK
))
3686 cnt
= hdev
->le_pkts
? &hdev
->le_cnt
: &hdev
->acl_cnt
;
3688 __check_timeout(hdev
, *cnt
, LE_LINK
);
3691 while (*cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3692 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3693 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3694 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3695 skb
->len
, skb
->priority
);
3697 /* Stop if priority has changed */
3698 if (skb
->priority
< priority
)
3701 skb
= skb_dequeue(&chan
->data_q
);
3703 hci_send_frame(hdev
, skb
);
3704 hdev
->le_last_tx
= jiffies
;
3710 /* Send pending SCO packets right away */
3711 hci_sched_sco(hdev
);
3712 hci_sched_esco(hdev
);
3717 hci_prio_recalculate(hdev
, LE_LINK
);
3721 static void hci_sched_iso(struct hci_dev
*hdev
)
3723 struct hci_conn
*conn
;
3724 struct sk_buff
*skb
;
3727 BT_DBG("%s", hdev
->name
);
3729 if (!hci_conn_num(hdev
, ISO_LINK
))
3732 cnt
= hdev
->iso_pkts
? &hdev
->iso_cnt
:
3733 hdev
->le_pkts
? &hdev
->le_cnt
: &hdev
->acl_cnt
;
3734 while (*cnt
&& (conn
= hci_low_sent(hdev
, ISO_LINK
, "e
))) {
3735 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3736 BT_DBG("skb %p len %d", skb
, skb
->len
);
3737 hci_send_frame(hdev
, skb
);
3740 if (conn
->sent
== ~0)
3747 static void hci_tx_work(struct work_struct
*work
)
3749 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3750 struct sk_buff
*skb
;
3752 BT_DBG("%s acl %d sco %d le %d iso %d", hdev
->name
, hdev
->acl_cnt
,
3753 hdev
->sco_cnt
, hdev
->le_cnt
, hdev
->iso_cnt
);
3755 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
3756 /* Schedule queues and send stuff to HCI driver */
3757 hci_sched_sco(hdev
);
3758 hci_sched_esco(hdev
);
3759 hci_sched_iso(hdev
);
3760 hci_sched_acl(hdev
);
3764 /* Send next queued raw (unknown type) packet */
3765 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3766 hci_send_frame(hdev
, skb
);
3769 /* ----- HCI RX task (incoming data processing) ----- */
3771 /* ACL data packet */
3772 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3774 struct hci_acl_hdr
*hdr
;
3775 struct hci_conn
*conn
;
3776 __u16 handle
, flags
;
3778 hdr
= skb_pull_data(skb
, sizeof(*hdr
));
3780 bt_dev_err(hdev
, "ACL packet too small");
3784 handle
= __le16_to_cpu(hdr
->handle
);
3785 flags
= hci_flags(handle
);
3786 handle
= hci_handle(handle
);
3788 bt_dev_dbg(hdev
, "len %d handle 0x%4.4x flags 0x%4.4x", skb
->len
,
3791 hdev
->stat
.acl_rx
++;
3794 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3795 hci_dev_unlock(hdev
);
3798 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3800 /* Send to upper protocol */
3801 l2cap_recv_acldata(conn
, skb
, flags
);
3804 bt_dev_err(hdev
, "ACL packet for unknown connection handle %d",
3812 /* SCO data packet */
3813 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3815 struct hci_sco_hdr
*hdr
;
3816 struct hci_conn
*conn
;
3817 __u16 handle
, flags
;
3819 hdr
= skb_pull_data(skb
, sizeof(*hdr
));
3821 bt_dev_err(hdev
, "SCO packet too small");
3825 handle
= __le16_to_cpu(hdr
->handle
);
3826 flags
= hci_flags(handle
);
3827 handle
= hci_handle(handle
);
3829 bt_dev_dbg(hdev
, "len %d handle 0x%4.4x flags 0x%4.4x", skb
->len
,
3832 hdev
->stat
.sco_rx
++;
3835 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3836 hci_dev_unlock(hdev
);
3839 /* Send to upper protocol */
3840 hci_skb_pkt_status(skb
) = flags
& 0x03;
3841 sco_recv_scodata(conn
, skb
);
3844 bt_dev_err_ratelimited(hdev
, "SCO packet for unknown connection handle %d",
3852 static void hci_isodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3854 struct hci_iso_hdr
*hdr
;
3855 struct hci_conn
*conn
;
3856 __u16 handle
, flags
;
3858 hdr
= skb_pull_data(skb
, sizeof(*hdr
));
3860 bt_dev_err(hdev
, "ISO packet too small");
3864 handle
= __le16_to_cpu(hdr
->handle
);
3865 flags
= hci_flags(handle
);
3866 handle
= hci_handle(handle
);
3868 bt_dev_dbg(hdev
, "len %d handle 0x%4.4x flags 0x%4.4x", skb
->len
,
3872 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3873 hci_dev_unlock(hdev
);
3876 bt_dev_err(hdev
, "ISO packet for unknown connection handle %d",
3881 /* Send to upper protocol */
3882 iso_recv(conn
, skb
, flags
);
3889 static bool hci_req_is_complete(struct hci_dev
*hdev
)
3891 struct sk_buff
*skb
;
3893 skb
= skb_peek(&hdev
->cmd_q
);
3897 return (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
);
3900 static void hci_resend_last(struct hci_dev
*hdev
)
3902 struct hci_command_hdr
*sent
;
3903 struct sk_buff
*skb
;
3906 if (!hdev
->sent_cmd
)
3909 sent
= (void *) hdev
->sent_cmd
->data
;
3910 opcode
= __le16_to_cpu(sent
->opcode
);
3911 if (opcode
== HCI_OP_RESET
)
3914 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
3918 skb_queue_head(&hdev
->cmd_q
, skb
);
3919 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3922 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
3923 hci_req_complete_t
*req_complete
,
3924 hci_req_complete_skb_t
*req_complete_skb
)
3926 struct sk_buff
*skb
;
3927 unsigned long flags
;
3929 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3931 /* If the completed command doesn't match the last one that was
3932 * sent we need to do special handling of it.
3934 if (!hci_sent_cmd_data(hdev
, opcode
)) {
3935 /* Some CSR based controllers generate a spontaneous
3936 * reset complete event during init and any pending
3937 * command will never be completed. In such a case we
3938 * need to resend whatever was the last sent
3941 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
3942 hci_resend_last(hdev
);
3947 /* If we reach this point this event matches the last command sent */
3948 hci_dev_clear_flag(hdev
, HCI_CMD_PENDING
);
3950 /* If the command succeeded and there's still more commands in
3951 * this request the request is not yet complete.
3953 if (!status
&& !hci_req_is_complete(hdev
))
3956 skb
= hdev
->req_skb
;
3958 /* If this was the last command in a request the complete
3959 * callback would be found in hdev->req_skb instead of the
3960 * command queue (hdev->cmd_q).
3962 if (skb
&& bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
) {
3963 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
3967 if (skb
&& bt_cb(skb
)->hci
.req_complete
) {
3968 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
3972 /* Remove all pending commands belonging to this request */
3973 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3974 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
3975 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
) {
3976 __skb_queue_head(&hdev
->cmd_q
, skb
);
3980 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
)
3981 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
3983 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
3984 dev_kfree_skb_irq(skb
);
3986 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3989 static void hci_rx_work(struct work_struct
*work
)
3991 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
3992 struct sk_buff
*skb
;
3994 BT_DBG("%s", hdev
->name
);
3996 /* The kcov_remote functions used for collecting packet parsing
3997 * coverage information from this background thread and associate
3998 * the coverage with the syscall's thread which originally injected
3999 * the packet. This helps fuzzing the kernel.
4001 for (; (skb
= skb_dequeue(&hdev
->rx_q
)); kcov_remote_stop()) {
4002 kcov_remote_start_common(skb_get_kcov_handle(skb
));
4004 /* Send copy to monitor */
4005 hci_send_to_monitor(hdev
, skb
);
4007 if (atomic_read(&hdev
->promisc
)) {
4008 /* Send copy to the sockets */
4009 hci_send_to_sock(hdev
, skb
);
4012 /* If the device has been opened in HCI_USER_CHANNEL,
4013 * the userspace has exclusive access to device.
4014 * When device is HCI_INIT, we still need to process
4015 * the data packets to the driver in order
4016 * to complete its setup().
4018 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
4019 !test_bit(HCI_INIT
, &hdev
->flags
)) {
4024 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4025 /* Don't process data packets in this states. */
4026 switch (hci_skb_pkt_type(skb
)) {
4027 case HCI_ACLDATA_PKT
:
4028 case HCI_SCODATA_PKT
:
4029 case HCI_ISODATA_PKT
:
4036 switch (hci_skb_pkt_type(skb
)) {
4038 BT_DBG("%s Event packet", hdev
->name
);
4039 hci_event_packet(hdev
, skb
);
4042 case HCI_ACLDATA_PKT
:
4043 BT_DBG("%s ACL data packet", hdev
->name
);
4044 hci_acldata_packet(hdev
, skb
);
4047 case HCI_SCODATA_PKT
:
4048 BT_DBG("%s SCO data packet", hdev
->name
);
4049 hci_scodata_packet(hdev
, skb
);
4052 case HCI_ISODATA_PKT
:
4053 BT_DBG("%s ISO data packet", hdev
->name
);
4054 hci_isodata_packet(hdev
, skb
);
4064 static void hci_send_cmd_sync(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4068 bt_dev_dbg(hdev
, "skb %p", skb
);
4070 kfree_skb(hdev
->sent_cmd
);
4072 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4073 if (!hdev
->sent_cmd
) {
4074 skb_queue_head(&hdev
->cmd_q
, skb
);
4075 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4079 err
= hci_send_frame(hdev
, skb
);
4081 hci_cmd_sync_cancel_sync(hdev
, -err
);
4085 if (hdev
->req_status
== HCI_REQ_PEND
&&
4086 !hci_dev_test_and_set_flag(hdev
, HCI_CMD_PENDING
)) {
4087 kfree_skb(hdev
->req_skb
);
4088 hdev
->req_skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4091 atomic_dec(&hdev
->cmd_cnt
);
4094 static void hci_cmd_work(struct work_struct
*work
)
4096 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4097 struct sk_buff
*skb
;
4099 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4100 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4102 /* Send queued commands */
4103 if (atomic_read(&hdev
->cmd_cnt
)) {
4104 skb
= skb_dequeue(&hdev
->cmd_q
);
4108 hci_send_cmd_sync(hdev
, skb
);
4111 if (test_bit(HCI_RESET
, &hdev
->flags
) ||
4112 hci_dev_test_flag(hdev
, HCI_CMD_DRAIN_WORKQUEUE
))
4113 cancel_delayed_work(&hdev
->cmd_timer
);
4115 queue_delayed_work(hdev
->workqueue
, &hdev
->cmd_timer
,