2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
39 skb_queue_head_init(&req
->cmd_q
);
44 void hci_req_purge(struct hci_request
*req
)
46 skb_queue_purge(&req
->cmd_q
);
49 static int req_run(struct hci_request
*req
, hci_req_complete_t complete
,
50 hci_req_complete_skb_t complete_skb
)
52 struct hci_dev
*hdev
= req
->hdev
;
56 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
62 skb_queue_purge(&req
->cmd_q
);
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req
->cmd_q
))
70 skb
= skb_peek_tail(&req
->cmd_q
);
72 bt_cb(skb
)->hci
.req_complete
= complete
;
73 } else if (complete_skb
) {
74 bt_cb(skb
)->hci
.req_complete_skb
= complete_skb
;
75 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_SKB
;
78 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
79 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
80 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
82 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
87 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
89 return req_run(req
, complete
, NULL
);
92 int hci_req_run_skb(struct hci_request
*req
, hci_req_complete_skb_t complete
)
94 return req_run(req
, NULL
, complete
);
97 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
100 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= result
;
104 hdev
->req_status
= HCI_REQ_DONE
;
106 hdev
->req_skb
= skb_get(skb
);
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 void hci_req_sync_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
123 const void *param
, u8 event
, u32 timeout
)
125 struct hci_request req
;
129 BT_DBG("%s", hdev
->name
);
131 hci_req_init(&req
, hdev
);
133 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
135 hdev
->req_status
= HCI_REQ_PEND
;
137 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
141 err
= wait_event_interruptible_timeout(hdev
->req_wait_q
,
142 hdev
->req_status
!= HCI_REQ_PEND
, timeout
);
144 if (err
== -ERESTARTSYS
)
145 return ERR_PTR(-EINTR
);
147 switch (hdev
->req_status
) {
149 err
= -bt_to_errno(hdev
->req_result
);
152 case HCI_REQ_CANCELED
:
153 err
= -hdev
->req_result
;
161 hdev
->req_status
= hdev
->req_result
= 0;
163 hdev
->req_skb
= NULL
;
165 BT_DBG("%s end: err %d", hdev
->name
, err
);
173 return ERR_PTR(-ENODATA
);
177 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
179 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
180 const void *param
, u32 timeout
)
182 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
184 EXPORT_SYMBOL(__hci_cmd_sync
);
186 /* Execute request and wait for completion. */
187 int __hci_req_sync(struct hci_dev
*hdev
, int (*func
)(struct hci_request
*req
,
189 unsigned long opt
, u32 timeout
, u8
*hci_status
)
191 struct hci_request req
;
194 BT_DBG("%s start", hdev
->name
);
196 hci_req_init(&req
, hdev
);
198 hdev
->req_status
= HCI_REQ_PEND
;
200 err
= func(&req
, opt
);
203 *hci_status
= HCI_ERROR_UNSPECIFIED
;
207 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
209 hdev
->req_status
= 0;
211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
216 if (err
== -ENODATA
) {
223 *hci_status
= HCI_ERROR_UNSPECIFIED
;
228 err
= wait_event_interruptible_timeout(hdev
->req_wait_q
,
229 hdev
->req_status
!= HCI_REQ_PEND
, timeout
);
231 if (err
== -ERESTARTSYS
)
234 switch (hdev
->req_status
) {
236 err
= -bt_to_errno(hdev
->req_result
);
238 *hci_status
= hdev
->req_result
;
241 case HCI_REQ_CANCELED
:
242 err
= -hdev
->req_result
;
244 *hci_status
= HCI_ERROR_UNSPECIFIED
;
250 *hci_status
= HCI_ERROR_UNSPECIFIED
;
254 kfree_skb(hdev
->req_skb
);
255 hdev
->req_skb
= NULL
;
256 hdev
->req_status
= hdev
->req_result
= 0;
258 BT_DBG("%s end: err %d", hdev
->name
, err
);
263 int hci_req_sync(struct hci_dev
*hdev
, int (*req
)(struct hci_request
*req
,
265 unsigned long opt
, u32 timeout
, u8
*hci_status
)
269 if (!test_bit(HCI_UP
, &hdev
->flags
))
272 /* Serialize all requests */
273 hci_req_sync_lock(hdev
);
274 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
, hci_status
);
275 hci_req_sync_unlock(hdev
);
280 struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
283 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
284 struct hci_command_hdr
*hdr
;
287 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
291 hdr
= skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
292 hdr
->opcode
= cpu_to_le16(opcode
);
296 skb_put_data(skb
, param
, plen
);
298 BT_DBG("skb len %d", skb
->len
);
300 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
301 hci_skb_opcode(skb
) = opcode
;
306 /* Queue a command to an asynchronous HCI request */
307 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
308 const void *param
, u8 event
)
310 struct hci_dev
*hdev
= req
->hdev
;
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
321 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
323 bt_dev_err(hdev
, "no memory for command (opcode 0x%4.4x)",
329 if (skb_queue_empty(&req
->cmd_q
))
330 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
332 bt_cb(skb
)->hci
.req_event
= event
;
334 skb_queue_tail(&req
->cmd_q
, skb
);
337 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
340 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
343 void __hci_req_write_fast_connectable(struct hci_request
*req
, bool enable
)
345 struct hci_dev
*hdev
= req
->hdev
;
346 struct hci_cp_write_page_scan_activity acp
;
349 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
352 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
356 type
= PAGE_SCAN_TYPE_INTERLACED
;
358 /* 160 msec page scan interval */
359 acp
.interval
= cpu_to_le16(0x0100);
361 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
363 /* default 1.28 sec page scan */
364 acp
.interval
= cpu_to_le16(0x0800);
367 acp
.window
= cpu_to_le16(0x0012);
369 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
370 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
371 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
374 if (hdev
->page_scan_type
!= type
)
375 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
378 /* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
382 * This function requires the caller holds hdev->lock.
384 static void __hci_update_background_scan(struct hci_request
*req
)
386 struct hci_dev
*hdev
= req
->hdev
;
388 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
389 test_bit(HCI_INIT
, &hdev
->flags
) ||
390 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
391 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
392 hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) ||
393 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
400 /* If discovery is active don't interfere with it */
401 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
411 hci_discovery_filter_clear(hdev
);
413 if (list_empty(&hdev
->pend_le_conns
) &&
414 list_empty(&hdev
->pend_le_reports
)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
424 hci_req_add_le_scan_disable(req
);
426 BT_DBG("%s stopping background scanning", hdev
->name
);
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
436 if (hci_lookup_le_connect(hdev
))
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
442 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
443 hci_req_add_le_scan_disable(req
);
445 hci_req_add_le_passive_scan(req
);
447 BT_DBG("%s starting background scanning", hdev
->name
);
451 void __hci_req_update_name(struct hci_request
*req
)
453 struct hci_dev
*hdev
= req
->hdev
;
454 struct hci_cp_write_local_name cp
;
456 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
458 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
461 #define PNP_INFO_SVCLASS_ID 0x1200
463 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
465 u8
*ptr
= data
, *uuids_start
= NULL
;
466 struct bt_uuid
*uuid
;
471 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
474 if (uuid
->size
!= 16)
477 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
481 if (uuid16
== PNP_INFO_SVCLASS_ID
)
487 uuids_start
[1] = EIR_UUID16_ALL
;
491 /* Stop if not enough space to put next UUID */
492 if ((ptr
- data
) + sizeof(u16
) > len
) {
493 uuids_start
[1] = EIR_UUID16_SOME
;
497 *ptr
++ = (uuid16
& 0x00ff);
498 *ptr
++ = (uuid16
& 0xff00) >> 8;
499 uuids_start
[0] += sizeof(uuid16
);
505 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
507 u8
*ptr
= data
, *uuids_start
= NULL
;
508 struct bt_uuid
*uuid
;
513 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
514 if (uuid
->size
!= 32)
520 uuids_start
[1] = EIR_UUID32_ALL
;
524 /* Stop if not enough space to put next UUID */
525 if ((ptr
- data
) + sizeof(u32
) > len
) {
526 uuids_start
[1] = EIR_UUID32_SOME
;
530 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
532 uuids_start
[0] += sizeof(u32
);
538 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
540 u8
*ptr
= data
, *uuids_start
= NULL
;
541 struct bt_uuid
*uuid
;
546 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
547 if (uuid
->size
!= 128)
553 uuids_start
[1] = EIR_UUID128_ALL
;
557 /* Stop if not enough space to put next UUID */
558 if ((ptr
- data
) + 16 > len
) {
559 uuids_start
[1] = EIR_UUID128_SOME
;
563 memcpy(ptr
, uuid
->uuid
, 16);
565 uuids_start
[0] += 16;
571 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
576 name_len
= strlen(hdev
->dev_name
);
582 ptr
[1] = EIR_NAME_SHORT
;
584 ptr
[1] = EIR_NAME_COMPLETE
;
586 /* EIR Data length */
587 ptr
[0] = name_len
+ 1;
589 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
591 ptr
+= (name_len
+ 2);
594 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
596 ptr
[1] = EIR_TX_POWER
;
597 ptr
[2] = (u8
) hdev
->inq_tx_power
;
602 if (hdev
->devid_source
> 0) {
604 ptr
[1] = EIR_DEVICE_ID
;
606 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
607 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
608 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
609 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
614 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
615 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
616 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
619 void __hci_req_update_eir(struct hci_request
*req
)
621 struct hci_dev
*hdev
= req
->hdev
;
622 struct hci_cp_write_eir cp
;
624 if (!hdev_is_powered(hdev
))
627 if (!lmp_ext_inq_capable(hdev
))
630 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
633 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
636 memset(&cp
, 0, sizeof(cp
));
638 create_eir(hdev
, cp
.data
);
640 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
643 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
645 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
648 void hci_req_add_le_scan_disable(struct hci_request
*req
)
650 struct hci_dev
*hdev
= req
->hdev
;
652 if (use_ext_scan(hdev
)) {
653 struct hci_cp_le_set_ext_scan_enable cp
;
655 memset(&cp
, 0, sizeof(cp
));
656 cp
.enable
= LE_SCAN_DISABLE
;
657 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
, sizeof(cp
),
660 struct hci_cp_le_set_scan_enable cp
;
662 memset(&cp
, 0, sizeof(cp
));
663 cp
.enable
= LE_SCAN_DISABLE
;
664 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
668 static void add_to_white_list(struct hci_request
*req
,
669 struct hci_conn_params
*params
)
671 struct hci_cp_le_add_to_white_list cp
;
673 cp
.bdaddr_type
= params
->addr_type
;
674 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
676 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
679 static u8
update_white_list(struct hci_request
*req
)
681 struct hci_dev
*hdev
= req
->hdev
;
682 struct hci_conn_params
*params
;
683 struct bdaddr_list
*b
;
684 uint8_t white_list_entries
= 0;
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
692 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
696 if (!hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
697 &b
->bdaddr
, b
->bdaddr_type
) &&
698 !hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
699 &b
->bdaddr
, b
->bdaddr_type
)) {
700 struct hci_cp_le_del_from_white_list cp
;
702 cp
.bdaddr_type
= b
->bdaddr_type
;
703 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
705 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
710 if (hci_find_irk_by_addr(hdev
, &b
->bdaddr
, b
->bdaddr_type
)) {
711 /* White list can not be used with RPAs */
715 white_list_entries
++;
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
728 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
729 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
730 ¶ms
->addr
, params
->addr_type
))
733 if (white_list_entries
>= hdev
->le_white_list_size
) {
734 /* Select filter policy to accept all advertising */
738 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
739 params
->addr_type
)) {
740 /* White list can not be used with RPAs */
744 white_list_entries
++;
745 add_to_white_list(req
, params
);
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
752 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
753 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
754 ¶ms
->addr
, params
->addr_type
))
757 if (white_list_entries
>= hdev
->le_white_list_size
) {
758 /* Select filter policy to accept all advertising */
762 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
763 params
->addr_type
)) {
764 /* White list can not be used with RPAs */
768 white_list_entries
++;
769 add_to_white_list(req
, params
);
772 /* Select filter policy to use white list */
776 static bool scan_use_rpa(struct hci_dev
*hdev
)
778 return hci_dev_test_flag(hdev
, HCI_PRIVACY
);
781 static void hci_req_start_scan(struct hci_request
*req
, u8 type
, u16 interval
,
782 u16 window
, u8 own_addr_type
, u8 filter_policy
)
784 struct hci_dev
*hdev
= req
->hdev
;
786 /* Use ext scanning if set ext scan param and ext scan enable is
789 if (use_ext_scan(hdev
)) {
790 struct hci_cp_le_set_ext_scan_params
*ext_param_cp
;
791 struct hci_cp_le_set_ext_scan_enable ext_enable_cp
;
792 struct hci_cp_le_scan_phy_params
*phy_params
;
793 u8 data
[sizeof(*ext_param_cp
) + sizeof(*phy_params
) * 2];
796 ext_param_cp
= (void *)data
;
797 phy_params
= (void *)ext_param_cp
->data
;
799 memset(ext_param_cp
, 0, sizeof(*ext_param_cp
));
800 ext_param_cp
->own_addr_type
= own_addr_type
;
801 ext_param_cp
->filter_policy
= filter_policy
;
803 plen
= sizeof(*ext_param_cp
);
805 if (scan_1m(hdev
) || scan_2m(hdev
)) {
806 ext_param_cp
->scanning_phys
|= LE_SCAN_PHY_1M
;
808 memset(phy_params
, 0, sizeof(*phy_params
));
809 phy_params
->type
= type
;
810 phy_params
->interval
= cpu_to_le16(interval
);
811 phy_params
->window
= cpu_to_le16(window
);
813 plen
+= sizeof(*phy_params
);
817 if (scan_coded(hdev
)) {
818 ext_param_cp
->scanning_phys
|= LE_SCAN_PHY_CODED
;
820 memset(phy_params
, 0, sizeof(*phy_params
));
821 phy_params
->type
= type
;
822 phy_params
->interval
= cpu_to_le16(interval
);
823 phy_params
->window
= cpu_to_le16(window
);
825 plen
+= sizeof(*phy_params
);
829 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_PARAMS
,
832 memset(&ext_enable_cp
, 0, sizeof(ext_enable_cp
));
833 ext_enable_cp
.enable
= LE_SCAN_ENABLE
;
834 ext_enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
836 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
,
837 sizeof(ext_enable_cp
), &ext_enable_cp
);
839 struct hci_cp_le_set_scan_param param_cp
;
840 struct hci_cp_le_set_scan_enable enable_cp
;
842 memset(¶m_cp
, 0, sizeof(param_cp
));
843 param_cp
.type
= type
;
844 param_cp
.interval
= cpu_to_le16(interval
);
845 param_cp
.window
= cpu_to_le16(window
);
846 param_cp
.own_address_type
= own_addr_type
;
847 param_cp
.filter_policy
= filter_policy
;
848 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
851 memset(&enable_cp
, 0, sizeof(enable_cp
));
852 enable_cp
.enable
= LE_SCAN_ENABLE
;
853 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
854 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
859 void hci_req_add_le_passive_scan(struct hci_request
*req
)
861 struct hci_dev
*hdev
= req
->hdev
;
865 /* Set require_privacy to false since no SCAN_REQ are send
866 * during passive scanning. Not using an non-resolvable address
867 * here is important so that peer devices using direct
868 * advertising with our address will be correctly reported
871 if (hci_update_random_address(req
, false, scan_use_rpa(hdev
),
875 /* Adding or removing entries from the white list must
876 * happen before enabling scanning. The controller does
877 * not allow white list modification while scanning.
879 filter_policy
= update_white_list(req
);
881 /* When the controller is using random resolvable addresses and
882 * with that having LE privacy enabled, then controllers with
883 * Extended Scanner Filter Policies support can now enable support
884 * for handling directed advertising.
886 * So instead of using filter polices 0x00 (no whitelist)
887 * and 0x01 (whitelist enabled) use the new filter policies
888 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
890 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
) &&
891 (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
))
892 filter_policy
|= 0x02;
894 hci_req_start_scan(req
, LE_SCAN_PASSIVE
, hdev
->le_scan_interval
,
895 hdev
->le_scan_window
, own_addr_type
, filter_policy
);
898 static u8
get_adv_instance_scan_rsp_len(struct hci_dev
*hdev
, u8 instance
)
900 struct adv_info
*adv_instance
;
902 /* Ignore instance 0 */
903 if (instance
== 0x00)
906 adv_instance
= hci_find_adv_instance(hdev
, instance
);
910 /* TODO: Take into account the "appearance" and "local-name" flags here.
911 * These are currently being ignored as they are not supported.
913 return adv_instance
->scan_rsp_len
;
916 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
918 u8 instance
= hdev
->cur_adv_instance
;
919 struct adv_info
*adv_instance
;
921 /* Ignore instance 0 */
922 if (instance
== 0x00)
925 adv_instance
= hci_find_adv_instance(hdev
, instance
);
929 /* TODO: Take into account the "appearance" and "local-name" flags here.
930 * These are currently being ignored as they are not supported.
932 return adv_instance
->scan_rsp_len
;
935 void __hci_req_disable_advertising(struct hci_request
*req
)
937 if (ext_adv_capable(req
->hdev
)) {
938 struct hci_cp_le_set_ext_adv_enable cp
;
941 /* Disable all sets since we only support one set at the moment */
942 cp
.num_of_sets
= 0x00;
944 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_ENABLE
, sizeof(cp
), &cp
);
948 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
952 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
955 struct adv_info
*adv_instance
;
957 if (instance
== 0x00) {
958 /* Instance 0 always manages the "Tx Power" and "Flags"
961 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
963 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
964 * corresponds to the "connectable" instance flag.
966 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
967 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
969 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
970 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
971 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
972 flags
|= MGMT_ADV_FLAG_DISCOV
;
977 adv_instance
= hci_find_adv_instance(hdev
, instance
);
979 /* Return 0 when we got an invalid instance identifier. */
983 return adv_instance
->flags
;
986 static bool adv_use_rpa(struct hci_dev
*hdev
, uint32_t flags
)
988 /* If privacy is not enabled don't use RPA */
989 if (!hci_dev_test_flag(hdev
, HCI_PRIVACY
))
992 /* If basic privacy mode is enabled use RPA */
993 if (!hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
996 /* If limited privacy mode is enabled don't use RPA if we're
997 * both discoverable and bondable.
999 if ((flags
& MGMT_ADV_FLAG_DISCOV
) &&
1000 hci_dev_test_flag(hdev
, HCI_BONDABLE
))
1003 /* We're neither bondable nor discoverable in the limited
1004 * privacy mode, therefore use RPA.
1009 static bool is_advertising_allowed(struct hci_dev
*hdev
, bool connectable
)
1011 /* If there is no connection we are OK to advertise. */
1012 if (hci_conn_num(hdev
, LE_LINK
) == 0)
1015 /* Check le_states if there is any connection in slave role. */
1016 if (hdev
->conn_hash
.le_num_slave
> 0) {
1017 /* Slave connection state and non connectable mode bit 20. */
1018 if (!connectable
&& !(hdev
->le_states
[2] & 0x10))
1021 /* Slave connection state and connectable mode bit 38
1022 * and scannable bit 21.
1024 if (connectable
&& (!(hdev
->le_states
[4] & 0x40) ||
1025 !(hdev
->le_states
[2] & 0x20)))
1029 /* Check le_states if there is any connection in master role. */
1030 if (hci_conn_num(hdev
, LE_LINK
) != hdev
->conn_hash
.le_num_slave
) {
1031 /* Master connection state and non connectable mode bit 18. */
1032 if (!connectable
&& !(hdev
->le_states
[2] & 0x02))
1035 /* Master connection state and connectable mode bit 35 and
1038 if (connectable
&& (!(hdev
->le_states
[4] & 0x08) ||
1039 !(hdev
->le_states
[2] & 0x08)))
1046 void __hci_req_enable_advertising(struct hci_request
*req
)
1048 struct hci_dev
*hdev
= req
->hdev
;
1049 struct hci_cp_le_set_adv_param cp
;
1050 u8 own_addr_type
, enable
= 0x01;
1054 flags
= get_adv_instance_flags(hdev
, hdev
->cur_adv_instance
);
1056 /* If the "connectable" instance flag was not set, then choose between
1057 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1059 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1060 mgmt_get_connectable(hdev
);
1062 if (!is_advertising_allowed(hdev
, connectable
))
1065 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1066 __hci_req_disable_advertising(req
);
1068 /* Clear the HCI_LE_ADV bit temporarily so that the
1069 * hci_update_random_address knows that it's safe to go ahead
1070 * and write a new random address. The flag will be set back on
1071 * as soon as the SET_ADV_ENABLE HCI command completes.
1073 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1075 /* Set require_privacy to true only when non-connectable
1076 * advertising is used. In that case it is fine to use a
1077 * non-resolvable private address.
1079 if (hci_update_random_address(req
, !connectable
,
1080 adv_use_rpa(hdev
, flags
),
1081 &own_addr_type
) < 0)
1084 memset(&cp
, 0, sizeof(cp
));
1085 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1086 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1089 cp
.type
= LE_ADV_IND
;
1090 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
1091 cp
.type
= LE_ADV_SCAN_IND
;
1093 cp
.type
= LE_ADV_NONCONN_IND
;
1095 cp
.own_address_type
= own_addr_type
;
1096 cp
.channel_map
= hdev
->le_adv_channel_map
;
1098 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1100 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1103 u8
append_local_name(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
1106 size_t complete_len
;
1108 /* no space left for name (+ NULL + type + len) */
1109 if ((HCI_MAX_AD_LENGTH
- ad_len
) < HCI_MAX_SHORT_NAME_LENGTH
+ 3)
1112 /* use complete name if present and fits */
1113 complete_len
= strlen(hdev
->dev_name
);
1114 if (complete_len
&& complete_len
<= HCI_MAX_SHORT_NAME_LENGTH
)
1115 return eir_append_data(ptr
, ad_len
, EIR_NAME_COMPLETE
,
1116 hdev
->dev_name
, complete_len
+ 1);
1118 /* use short name if present */
1119 short_len
= strlen(hdev
->short_name
);
1121 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
,
1122 hdev
->short_name
, short_len
+ 1);
1124 /* use shortened full name if present, we already know that name
1125 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1128 u8 name
[HCI_MAX_SHORT_NAME_LENGTH
+ 1];
1130 memcpy(name
, hdev
->dev_name
, HCI_MAX_SHORT_NAME_LENGTH
);
1131 name
[HCI_MAX_SHORT_NAME_LENGTH
] = '\0';
1133 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
, name
,
1140 static u8
append_appearance(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
1142 return eir_append_le16(ptr
, ad_len
, EIR_APPEARANCE
, hdev
->appearance
);
1145 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
1147 u8 scan_rsp_len
= 0;
1149 if (hdev
->appearance
) {
1150 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1153 return append_local_name(hdev
, ptr
, scan_rsp_len
);
1156 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
1159 struct adv_info
*adv_instance
;
1161 u8 scan_rsp_len
= 0;
1163 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1167 instance_flags
= adv_instance
->flags
;
1169 if ((instance_flags
& MGMT_ADV_FLAG_APPEARANCE
) && hdev
->appearance
) {
1170 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1173 memcpy(&ptr
[scan_rsp_len
], adv_instance
->scan_rsp_data
,
1174 adv_instance
->scan_rsp_len
);
1176 scan_rsp_len
+= adv_instance
->scan_rsp_len
;
1178 if (instance_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
1179 scan_rsp_len
= append_local_name(hdev
, ptr
, scan_rsp_len
);
1181 return scan_rsp_len
;
1184 void __hci_req_update_scan_rsp_data(struct hci_request
*req
, u8 instance
)
1186 struct hci_dev
*hdev
= req
->hdev
;
1189 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1192 if (ext_adv_capable(hdev
)) {
1193 struct hci_cp_le_set_ext_scan_rsp_data cp
;
1195 memset(&cp
, 0, sizeof(cp
));
1198 len
= create_instance_scan_rsp_data(hdev
, instance
,
1201 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
1203 if (hdev
->scan_rsp_data_len
== len
&&
1204 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
1207 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
1208 hdev
->scan_rsp_data_len
= len
;
1212 cp
.operation
= LE_SET_ADV_DATA_OP_COMPLETE
;
1213 cp
.frag_pref
= LE_SET_ADV_DATA_NO_FRAG
;
1215 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA
, sizeof(cp
),
1218 struct hci_cp_le_set_scan_rsp_data cp
;
1220 memset(&cp
, 0, sizeof(cp
));
1223 len
= create_instance_scan_rsp_data(hdev
, instance
,
1226 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
1228 if (hdev
->scan_rsp_data_len
== len
&&
1229 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
1232 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
1233 hdev
->scan_rsp_data_len
= len
;
1237 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
1241 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1243 struct adv_info
*adv_instance
= NULL
;
1244 u8 ad_len
= 0, flags
= 0;
1247 /* Return 0 when the current instance identifier is invalid. */
1249 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1254 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1256 /* The Add Advertising command allows userspace to set both the general
1257 * and limited discoverable flags.
1259 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1260 flags
|= LE_AD_GENERAL
;
1262 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1263 flags
|= LE_AD_LIMITED
;
1265 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1266 flags
|= LE_AD_NO_BREDR
;
1268 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1269 /* If a discovery flag wasn't provided, simply use the global
1273 flags
|= mgmt_get_adv_discov_flags(hdev
);
1275 /* If flags would still be empty, then there is no need to
1276 * include the "Flags" AD field".
1289 memcpy(ptr
, adv_instance
->adv_data
,
1290 adv_instance
->adv_data_len
);
1291 ad_len
+= adv_instance
->adv_data_len
;
1292 ptr
+= adv_instance
->adv_data_len
;
1295 if (instance_flags
& MGMT_ADV_FLAG_TX_POWER
) {
1298 if (ext_adv_capable(hdev
)) {
1300 adv_tx_power
= adv_instance
->tx_power
;
1302 adv_tx_power
= hdev
->adv_tx_power
;
1304 adv_tx_power
= hdev
->adv_tx_power
;
1307 /* Provide Tx Power only if we can provide a valid value for it */
1308 if (adv_tx_power
!= HCI_TX_POWER_INVALID
) {
1310 ptr
[1] = EIR_TX_POWER
;
1311 ptr
[2] = (u8
)adv_tx_power
;
1321 void __hci_req_update_adv_data(struct hci_request
*req
, u8 instance
)
1323 struct hci_dev
*hdev
= req
->hdev
;
1326 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1329 if (ext_adv_capable(hdev
)) {
1330 struct hci_cp_le_set_ext_adv_data cp
;
1332 memset(&cp
, 0, sizeof(cp
));
1334 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1336 /* There's nothing to do if the data hasn't changed */
1337 if (hdev
->adv_data_len
== len
&&
1338 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1341 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1342 hdev
->adv_data_len
= len
;
1346 cp
.operation
= LE_SET_ADV_DATA_OP_COMPLETE
;
1347 cp
.frag_pref
= LE_SET_ADV_DATA_NO_FRAG
;
1349 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_DATA
, sizeof(cp
), &cp
);
1351 struct hci_cp_le_set_adv_data cp
;
1353 memset(&cp
, 0, sizeof(cp
));
1355 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1357 /* There's nothing to do if the data hasn't changed */
1358 if (hdev
->adv_data_len
== len
&&
1359 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1362 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1363 hdev
->adv_data_len
= len
;
1367 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1371 int hci_req_update_adv_data(struct hci_dev
*hdev
, u8 instance
)
1373 struct hci_request req
;
1375 hci_req_init(&req
, hdev
);
1376 __hci_req_update_adv_data(&req
, instance
);
1378 return hci_req_run(&req
, NULL
);
1381 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1383 BT_DBG("%s status %u", hdev
->name
, status
);
1386 void hci_req_reenable_advertising(struct hci_dev
*hdev
)
1388 struct hci_request req
;
1390 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1391 list_empty(&hdev
->adv_instances
))
1394 hci_req_init(&req
, hdev
);
1396 if (hdev
->cur_adv_instance
) {
1397 __hci_req_schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
1400 if (ext_adv_capable(hdev
)) {
1401 __hci_req_start_ext_adv(&req
, 0x00);
1403 __hci_req_update_adv_data(&req
, 0x00);
1404 __hci_req_update_scan_rsp_data(&req
, 0x00);
1405 __hci_req_enable_advertising(&req
);
1409 hci_req_run(&req
, adv_enable_complete
);
1412 static void adv_timeout_expire(struct work_struct
*work
)
1414 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1415 adv_instance_expire
.work
);
1417 struct hci_request req
;
1420 BT_DBG("%s", hdev
->name
);
1424 hdev
->adv_instance_timeout
= 0;
1426 instance
= hdev
->cur_adv_instance
;
1427 if (instance
== 0x00)
1430 hci_req_init(&req
, hdev
);
1432 hci_req_clear_adv_instance(hdev
, NULL
, &req
, instance
, false);
1434 if (list_empty(&hdev
->adv_instances
))
1435 __hci_req_disable_advertising(&req
);
1437 hci_req_run(&req
, NULL
);
1440 hci_dev_unlock(hdev
);
1443 int hci_get_random_address(struct hci_dev
*hdev
, bool require_privacy
,
1444 bool use_rpa
, struct adv_info
*adv_instance
,
1445 u8
*own_addr_type
, bdaddr_t
*rand_addr
)
1449 bacpy(rand_addr
, BDADDR_ANY
);
1451 /* If privacy is enabled use a resolvable private address. If
1452 * current RPA has expired then generate a new one.
1457 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1460 if (!adv_instance
->rpa_expired
&&
1461 !bacmp(&adv_instance
->random_addr
, &hdev
->rpa
))
1464 adv_instance
->rpa_expired
= false;
1466 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1467 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1471 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1473 BT_ERR("%s failed to generate new RPA", hdev
->name
);
1477 bacpy(rand_addr
, &hdev
->rpa
);
1479 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1481 queue_delayed_work(hdev
->workqueue
,
1482 &adv_instance
->rpa_expired_cb
, to
);
1484 queue_delayed_work(hdev
->workqueue
,
1485 &hdev
->rpa_expired
, to
);
1490 /* In case of required privacy without resolvable private address,
1491 * use an non-resolvable private address. This is useful for
1492 * non-connectable advertising.
1494 if (require_privacy
) {
1498 /* The non-resolvable private address is generated
1499 * from random six bytes with the two most significant
1502 get_random_bytes(&nrpa
, 6);
1505 /* The non-resolvable private address shall not be
1506 * equal to the public address.
1508 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1512 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1513 bacpy(rand_addr
, &nrpa
);
1518 /* No privacy so use a public address. */
1519 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1524 void __hci_req_clear_ext_adv_sets(struct hci_request
*req
)
1526 hci_req_add(req
, HCI_OP_LE_CLEAR_ADV_SETS
, 0, NULL
);
1529 int __hci_req_setup_ext_adv_instance(struct hci_request
*req
, u8 instance
)
1531 struct hci_cp_le_set_ext_adv_params cp
;
1532 struct hci_dev
*hdev
= req
->hdev
;
1535 bdaddr_t random_addr
;
1538 struct adv_info
*adv_instance
;
1540 /* In ext adv set param interval is 3 octets */
1541 const u8 adv_interval
[3] = { 0x00, 0x08, 0x00 };
1544 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1548 adv_instance
= NULL
;
1551 flags
= get_adv_instance_flags(hdev
, instance
);
1553 /* If the "connectable" instance flag was not set, then choose between
1554 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1556 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1557 mgmt_get_connectable(hdev
);
1559 if (!is_advertising_allowed(hdev
, connectable
))
1562 /* Set require_privacy to true only when non-connectable
1563 * advertising is used. In that case it is fine to use a
1564 * non-resolvable private address.
1566 err
= hci_get_random_address(hdev
, !connectable
,
1567 adv_use_rpa(hdev
, flags
), adv_instance
,
1568 &own_addr_type
, &random_addr
);
1572 memset(&cp
, 0, sizeof(cp
));
1574 memcpy(cp
.min_interval
, adv_interval
, sizeof(cp
.min_interval
));
1575 memcpy(cp
.max_interval
, adv_interval
, sizeof(cp
.max_interval
));
1577 secondary_adv
= (flags
& MGMT_ADV_FLAG_SEC_MASK
);
1581 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_CONN_IND
);
1583 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_ADV_IND
);
1584 } else if (get_adv_instance_scan_rsp_len(hdev
, instance
)) {
1586 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_SCAN_IND
);
1588 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_ADV_SCAN_IND
);
1591 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_NON_CONN_IND
);
1593 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_NONCONN_IND
);
1596 cp
.own_addr_type
= own_addr_type
;
1597 cp
.channel_map
= hdev
->le_adv_channel_map
;
1601 if (flags
& MGMT_ADV_FLAG_SEC_2M
) {
1602 cp
.primary_phy
= HCI_ADV_PHY_1M
;
1603 cp
.secondary_phy
= HCI_ADV_PHY_2M
;
1604 } else if (flags
& MGMT_ADV_FLAG_SEC_CODED
) {
1605 cp
.primary_phy
= HCI_ADV_PHY_CODED
;
1606 cp
.secondary_phy
= HCI_ADV_PHY_CODED
;
1608 /* In all other cases use 1M */
1609 cp
.primary_phy
= HCI_ADV_PHY_1M
;
1610 cp
.secondary_phy
= HCI_ADV_PHY_1M
;
1613 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_PARAMS
, sizeof(cp
), &cp
);
1615 if (own_addr_type
== ADDR_LE_DEV_RANDOM
&&
1616 bacmp(&random_addr
, BDADDR_ANY
)) {
1617 struct hci_cp_le_set_adv_set_rand_addr cp
;
1619 /* Check if random address need to be updated */
1621 if (!bacmp(&random_addr
, &adv_instance
->random_addr
))
1624 if (!bacmp(&random_addr
, &hdev
->random_addr
))
1628 memset(&cp
, 0, sizeof(cp
));
1631 bacpy(&cp
.bdaddr
, &random_addr
);
1634 HCI_OP_LE_SET_ADV_SET_RAND_ADDR
,
1641 void __hci_req_enable_ext_advertising(struct hci_request
*req
)
1643 struct hci_cp_le_set_ext_adv_enable
*cp
;
1644 struct hci_cp_ext_adv_set
*adv_set
;
1645 u8 data
[sizeof(*cp
) + sizeof(*adv_set
) * 1];
1648 adv_set
= (void *) cp
->data
;
1650 memset(cp
, 0, sizeof(*cp
));
1653 cp
->num_of_sets
= 0x01;
1655 memset(adv_set
, 0, sizeof(*adv_set
));
1657 adv_set
->handle
= 0;
1659 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_ENABLE
,
1660 sizeof(*cp
) + sizeof(*adv_set
) * cp
->num_of_sets
,
1664 int __hci_req_start_ext_adv(struct hci_request
*req
, u8 instance
)
1666 struct hci_dev
*hdev
= req
->hdev
;
1669 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1670 __hci_req_disable_advertising(req
);
1672 err
= __hci_req_setup_ext_adv_instance(req
, instance
);
1676 __hci_req_update_scan_rsp_data(req
, instance
);
1677 __hci_req_enable_ext_advertising(req
);
1682 int __hci_req_schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1685 struct hci_dev
*hdev
= req
->hdev
;
1686 struct adv_info
*adv_instance
= NULL
;
1689 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1690 list_empty(&hdev
->adv_instances
))
1693 if (hdev
->adv_instance_timeout
)
1696 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1700 /* A zero timeout means unlimited advertising. As long as there is
1701 * only one instance, duration should be ignored. We still set a timeout
1702 * in case further instances are being added later on.
1704 * If the remaining lifetime of the instance is more than the duration
1705 * then the timeout corresponds to the duration, otherwise it will be
1706 * reduced to the remaining instance lifetime.
1708 if (adv_instance
->timeout
== 0 ||
1709 adv_instance
->duration
<= adv_instance
->remaining_time
)
1710 timeout
= adv_instance
->duration
;
1712 timeout
= adv_instance
->remaining_time
;
1714 /* The remaining time is being reduced unless the instance is being
1715 * advertised without time limit.
1717 if (adv_instance
->timeout
)
1718 adv_instance
->remaining_time
=
1719 adv_instance
->remaining_time
- timeout
;
1721 hdev
->adv_instance_timeout
= timeout
;
1722 queue_delayed_work(hdev
->req_workqueue
,
1723 &hdev
->adv_instance_expire
,
1724 msecs_to_jiffies(timeout
* 1000));
1726 /* If we're just re-scheduling the same instance again then do not
1727 * execute any HCI commands. This happens when a single instance is
1730 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1731 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1734 hdev
->cur_adv_instance
= instance
;
1735 if (ext_adv_capable(hdev
)) {
1736 __hci_req_start_ext_adv(req
, instance
);
1738 __hci_req_update_adv_data(req
, instance
);
1739 __hci_req_update_scan_rsp_data(req
, instance
);
1740 __hci_req_enable_advertising(req
);
1746 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1748 if (hdev
->adv_instance_timeout
) {
1749 hdev
->adv_instance_timeout
= 0;
1750 cancel_delayed_work(&hdev
->adv_instance_expire
);
1754 /* For a single instance:
1755 * - force == true: The instance will be removed even when its remaining
1756 * lifetime is not zero.
1757 * - force == false: the instance will be deactivated but kept stored unless
1758 * the remaining lifetime is zero.
1760 * For instance == 0x00:
1761 * - force == true: All instances will be removed regardless of their timeout
1763 * - force == false: Only instances that have a timeout will be removed.
1765 void hci_req_clear_adv_instance(struct hci_dev
*hdev
, struct sock
*sk
,
1766 struct hci_request
*req
, u8 instance
,
1769 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1773 /* Cancel any timeout concerning the removed instance(s). */
1774 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1775 cancel_adv_timeout(hdev
);
1777 /* Get the next instance to advertise BEFORE we remove
1778 * the current one. This can be the same instance again
1779 * if there is only one instance.
1781 if (instance
&& hdev
->cur_adv_instance
== instance
)
1782 next_instance
= hci_get_next_instance(hdev
, instance
);
1784 if (instance
== 0x00) {
1785 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1787 if (!(force
|| adv_instance
->timeout
))
1790 rem_inst
= adv_instance
->instance
;
1791 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1793 mgmt_advertising_removed(sk
, hdev
, rem_inst
);
1796 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1798 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1799 !adv_instance
->remaining_time
)) {
1800 /* Don't advertise a removed instance. */
1801 if (next_instance
&&
1802 next_instance
->instance
== instance
)
1803 next_instance
= NULL
;
1805 err
= hci_remove_adv_instance(hdev
, instance
);
1807 mgmt_advertising_removed(sk
, hdev
, instance
);
1811 if (!req
|| !hdev_is_powered(hdev
) ||
1812 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1816 __hci_req_schedule_adv_instance(req
, next_instance
->instance
,
1820 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
1822 struct hci_dev
*hdev
= req
->hdev
;
1824 /* If we're advertising or initiating an LE connection we can't
1825 * go ahead and change the random address at this time. This is
1826 * because the eventual initiator address used for the
1827 * subsequently created connection will be undefined (some
1828 * controllers use the new address and others the one we had
1829 * when the operation started).
1831 * In this kind of scenario skip the update and let the random
1832 * address be updated at the next cycle.
1834 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
) ||
1835 hci_lookup_le_connect(hdev
)) {
1836 BT_DBG("Deferring random address update");
1837 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1841 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
1844 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
1845 bool use_rpa
, u8
*own_addr_type
)
1847 struct hci_dev
*hdev
= req
->hdev
;
1850 /* If privacy is enabled use a resolvable private address. If
1851 * current RPA has expired or there is something else than
1852 * the current RPA in use, then generate a new one.
1857 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1859 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1860 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1863 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1865 bt_dev_err(hdev
, "failed to generate new RPA");
1869 set_random_addr(req
, &hdev
->rpa
);
1871 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1872 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
1877 /* In case of required privacy without resolvable private address,
1878 * use an non-resolvable private address. This is useful for active
1879 * scanning and non-connectable advertising.
1881 if (require_privacy
) {
1885 /* The non-resolvable private address is generated
1886 * from random six bytes with the two most significant
1889 get_random_bytes(&nrpa
, 6);
1892 /* The non-resolvable private address shall not be
1893 * equal to the public address.
1895 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1899 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1900 set_random_addr(req
, &nrpa
);
1904 /* If forcing static address is in use or there is no public
1905 * address use the static address as random address (but skip
1906 * the HCI command if the current random address is already the
1909 * In case BR/EDR has been disabled on a dual-mode controller
1910 * and a static address has been configured, then use that
1911 * address instead of the public BR/EDR address.
1913 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
1914 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
1915 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
1916 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
1917 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1918 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
1919 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
1920 &hdev
->static_addr
);
1924 /* Neither privacy nor static address is being used so use a
1927 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1932 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
1934 struct bdaddr_list
*b
;
1936 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
1937 struct hci_conn
*conn
;
1939 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
1943 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
1950 void __hci_req_update_scan(struct hci_request
*req
)
1952 struct hci_dev
*hdev
= req
->hdev
;
1955 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1958 if (!hdev_is_powered(hdev
))
1961 if (mgmt_powering_down(hdev
))
1964 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) ||
1965 disconnected_whitelist_entries(hdev
))
1968 scan
= SCAN_DISABLED
;
1970 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
1971 scan
|= SCAN_INQUIRY
;
1973 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
) &&
1974 test_bit(HCI_ISCAN
, &hdev
->flags
) == !!(scan
& SCAN_INQUIRY
))
1977 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1980 static int update_scan(struct hci_request
*req
, unsigned long opt
)
1982 hci_dev_lock(req
->hdev
);
1983 __hci_req_update_scan(req
);
1984 hci_dev_unlock(req
->hdev
);
1988 static void scan_update_work(struct work_struct
*work
)
1990 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, scan_update
);
1992 hci_req_sync(hdev
, update_scan
, 0, HCI_CMD_TIMEOUT
, NULL
);
1995 static int connectable_update(struct hci_request
*req
, unsigned long opt
)
1997 struct hci_dev
*hdev
= req
->hdev
;
2001 __hci_req_update_scan(req
);
2003 /* If BR/EDR is not enabled and we disable advertising as a
2004 * by-product of disabling connectable, we need to update the
2005 * advertising flags.
2007 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2008 __hci_req_update_adv_data(req
, hdev
->cur_adv_instance
);
2010 /* Update the advertising parameters if necessary */
2011 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2012 !list_empty(&hdev
->adv_instances
)) {
2013 if (ext_adv_capable(hdev
))
2014 __hci_req_start_ext_adv(req
, hdev
->cur_adv_instance
);
2016 __hci_req_enable_advertising(req
);
2019 __hci_update_background_scan(req
);
2021 hci_dev_unlock(hdev
);
2026 static void connectable_update_work(struct work_struct
*work
)
2028 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2029 connectable_update
);
2032 hci_req_sync(hdev
, connectable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
2033 mgmt_set_connectable_complete(hdev
, status
);
2036 static u8
get_service_classes(struct hci_dev
*hdev
)
2038 struct bt_uuid
*uuid
;
2041 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
2042 val
|= uuid
->svc_hint
;
2047 void __hci_req_update_class(struct hci_request
*req
)
2049 struct hci_dev
*hdev
= req
->hdev
;
2052 BT_DBG("%s", hdev
->name
);
2054 if (!hdev_is_powered(hdev
))
2057 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2060 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
2063 cod
[0] = hdev
->minor_class
;
2064 cod
[1] = hdev
->major_class
;
2065 cod
[2] = get_service_classes(hdev
);
2067 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
2070 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
2073 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
2076 static void write_iac(struct hci_request
*req
)
2078 struct hci_dev
*hdev
= req
->hdev
;
2079 struct hci_cp_write_current_iac_lap cp
;
2081 if (!hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
2084 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
)) {
2085 /* Limited discoverable mode */
2086 cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
2087 cp
.iac_lap
[0] = 0x00; /* LIAC */
2088 cp
.iac_lap
[1] = 0x8b;
2089 cp
.iac_lap
[2] = 0x9e;
2090 cp
.iac_lap
[3] = 0x33; /* GIAC */
2091 cp
.iac_lap
[4] = 0x8b;
2092 cp
.iac_lap
[5] = 0x9e;
2094 /* General discoverable mode */
2096 cp
.iac_lap
[0] = 0x33; /* GIAC */
2097 cp
.iac_lap
[1] = 0x8b;
2098 cp
.iac_lap
[2] = 0x9e;
2101 hci_req_add(req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
2102 (cp
.num_iac
* 3) + 1, &cp
);
2105 static int discoverable_update(struct hci_request
*req
, unsigned long opt
)
2107 struct hci_dev
*hdev
= req
->hdev
;
2111 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2113 __hci_req_update_scan(req
);
2114 __hci_req_update_class(req
);
2117 /* Advertising instances don't use the global discoverable setting, so
2118 * only update AD if advertising was enabled using Set Advertising.
2120 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2121 __hci_req_update_adv_data(req
, 0x00);
2123 /* Discoverable mode affects the local advertising
2124 * address in limited privacy mode.
2126 if (hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
)) {
2127 if (ext_adv_capable(hdev
))
2128 __hci_req_start_ext_adv(req
, 0x00);
2130 __hci_req_enable_advertising(req
);
2134 hci_dev_unlock(hdev
);
2139 static void discoverable_update_work(struct work_struct
*work
)
2141 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2142 discoverable_update
);
2145 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
2146 mgmt_set_discoverable_complete(hdev
, status
);
2149 void __hci_abort_conn(struct hci_request
*req
, struct hci_conn
*conn
,
2152 switch (conn
->state
) {
2155 if (conn
->type
== AMP_LINK
) {
2156 struct hci_cp_disconn_phy_link cp
;
2158 cp
.phy_handle
= HCI_PHY_HANDLE(conn
->handle
);
2160 hci_req_add(req
, HCI_OP_DISCONN_PHY_LINK
, sizeof(cp
),
2163 struct hci_cp_disconnect dc
;
2165 dc
.handle
= cpu_to_le16(conn
->handle
);
2167 hci_req_add(req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2170 conn
->state
= BT_DISCONN
;
2174 if (conn
->type
== LE_LINK
) {
2175 if (test_bit(HCI_CONN_SCANNING
, &conn
->flags
))
2177 hci_req_add(req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
2179 } else if (conn
->type
== ACL_LINK
) {
2180 if (req
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
2182 hci_req_add(req
, HCI_OP_CREATE_CONN_CANCEL
,
2187 if (conn
->type
== ACL_LINK
) {
2188 struct hci_cp_reject_conn_req rej
;
2190 bacpy(&rej
.bdaddr
, &conn
->dst
);
2191 rej
.reason
= reason
;
2193 hci_req_add(req
, HCI_OP_REJECT_CONN_REQ
,
2195 } else if (conn
->type
== SCO_LINK
|| conn
->type
== ESCO_LINK
) {
2196 struct hci_cp_reject_sync_conn_req rej
;
2198 bacpy(&rej
.bdaddr
, &conn
->dst
);
2200 /* SCO rejection has its own limited set of
2201 * allowed error values (0x0D-0x0F) which isn't
2202 * compatible with most values passed to this
2203 * function. To be safe hard-code one of the
2204 * values that's suitable for SCO.
2206 rej
.reason
= HCI_ERROR_REJ_LIMITED_RESOURCES
;
2208 hci_req_add(req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
2213 conn
->state
= BT_CLOSED
;
2218 static void abort_conn_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2221 BT_DBG("Failed to abort connection: status 0x%2.2x", status
);
2224 int hci_abort_conn(struct hci_conn
*conn
, u8 reason
)
2226 struct hci_request req
;
2229 hci_req_init(&req
, conn
->hdev
);
2231 __hci_abort_conn(&req
, conn
, reason
);
2233 err
= hci_req_run(&req
, abort_conn_complete
);
2234 if (err
&& err
!= -ENODATA
) {
2235 bt_dev_err(conn
->hdev
, "failed to run HCI request: err %d", err
);
2242 static int update_bg_scan(struct hci_request
*req
, unsigned long opt
)
2244 hci_dev_lock(req
->hdev
);
2245 __hci_update_background_scan(req
);
2246 hci_dev_unlock(req
->hdev
);
2250 static void bg_scan_update(struct work_struct
*work
)
2252 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2254 struct hci_conn
*conn
;
2258 err
= hci_req_sync(hdev
, update_bg_scan
, 0, HCI_CMD_TIMEOUT
, &status
);
2264 conn
= hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
);
2266 hci_le_conn_failed(conn
, status
);
2268 hci_dev_unlock(hdev
);
2271 static int le_scan_disable(struct hci_request
*req
, unsigned long opt
)
2273 hci_req_add_le_scan_disable(req
);
2277 static int bredr_inquiry(struct hci_request
*req
, unsigned long opt
)
2280 const u8 giac
[3] = { 0x33, 0x8b, 0x9e };
2281 const u8 liac
[3] = { 0x00, 0x8b, 0x9e };
2282 struct hci_cp_inquiry cp
;
2284 BT_DBG("%s", req
->hdev
->name
);
2286 hci_dev_lock(req
->hdev
);
2287 hci_inquiry_cache_flush(req
->hdev
);
2288 hci_dev_unlock(req
->hdev
);
2290 memset(&cp
, 0, sizeof(cp
));
2292 if (req
->hdev
->discovery
.limited
)
2293 memcpy(&cp
.lap
, liac
, sizeof(cp
.lap
));
2295 memcpy(&cp
.lap
, giac
, sizeof(cp
.lap
));
2299 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2304 static void le_scan_disable_work(struct work_struct
*work
)
2306 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2307 le_scan_disable
.work
);
2310 BT_DBG("%s", hdev
->name
);
2312 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2315 cancel_delayed_work(&hdev
->le_scan_restart
);
2317 hci_req_sync(hdev
, le_scan_disable
, 0, HCI_CMD_TIMEOUT
, &status
);
2319 bt_dev_err(hdev
, "failed to disable LE scan: status 0x%02x",
2324 hdev
->discovery
.scan_start
= 0;
2326 /* If we were running LE only scan, change discovery state. If
2327 * we were running both LE and BR/EDR inquiry simultaneously,
2328 * and BR/EDR inquiry is already finished, stop discovery,
2329 * otherwise BR/EDR inquiry will stop discovery when finished.
2330 * If we will resolve remote device name, do not change
2334 if (hdev
->discovery
.type
== DISCOV_TYPE_LE
)
2335 goto discov_stopped
;
2337 if (hdev
->discovery
.type
!= DISCOV_TYPE_INTERLEAVED
)
2340 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
)) {
2341 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
2342 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
2343 goto discov_stopped
;
2348 hci_req_sync(hdev
, bredr_inquiry
, DISCOV_INTERLEAVED_INQUIRY_LEN
,
2349 HCI_CMD_TIMEOUT
, &status
);
2351 bt_dev_err(hdev
, "inquiry failed: status 0x%02x", status
);
2352 goto discov_stopped
;
2359 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2360 hci_dev_unlock(hdev
);
2363 static int le_scan_restart(struct hci_request
*req
, unsigned long opt
)
2365 struct hci_dev
*hdev
= req
->hdev
;
2367 /* If controller is not scanning we are done. */
2368 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2371 hci_req_add_le_scan_disable(req
);
2373 if (use_ext_scan(hdev
)) {
2374 struct hci_cp_le_set_ext_scan_enable ext_enable_cp
;
2376 memset(&ext_enable_cp
, 0, sizeof(ext_enable_cp
));
2377 ext_enable_cp
.enable
= LE_SCAN_ENABLE
;
2378 ext_enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2380 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
,
2381 sizeof(ext_enable_cp
), &ext_enable_cp
);
2383 struct hci_cp_le_set_scan_enable cp
;
2385 memset(&cp
, 0, sizeof(cp
));
2386 cp
.enable
= LE_SCAN_ENABLE
;
2387 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2388 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2394 static void le_scan_restart_work(struct work_struct
*work
)
2396 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2397 le_scan_restart
.work
);
2398 unsigned long timeout
, duration
, scan_start
, now
;
2401 BT_DBG("%s", hdev
->name
);
2403 hci_req_sync(hdev
, le_scan_restart
, 0, HCI_CMD_TIMEOUT
, &status
);
2405 bt_dev_err(hdev
, "failed to restart LE scan: status %d",
2412 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
2413 !hdev
->discovery
.scan_start
)
2416 /* When the scan was started, hdev->le_scan_disable has been queued
2417 * after duration from scan_start. During scan restart this job
2418 * has been canceled, and we need to queue it again after proper
2419 * timeout, to make sure that scan does not run indefinitely.
2421 duration
= hdev
->discovery
.scan_duration
;
2422 scan_start
= hdev
->discovery
.scan_start
;
2424 if (now
- scan_start
<= duration
) {
2427 if (now
>= scan_start
)
2428 elapsed
= now
- scan_start
;
2430 elapsed
= ULONG_MAX
- scan_start
+ now
;
2432 timeout
= duration
- elapsed
;
2437 queue_delayed_work(hdev
->req_workqueue
,
2438 &hdev
->le_scan_disable
, timeout
);
2441 hci_dev_unlock(hdev
);
2444 static int active_scan(struct hci_request
*req
, unsigned long opt
)
2446 uint16_t interval
= opt
;
2447 struct hci_dev
*hdev
= req
->hdev
;
2451 BT_DBG("%s", hdev
->name
);
2453 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
2456 /* Don't let discovery abort an outgoing connection attempt
2457 * that's using directed advertising.
2459 if (hci_lookup_le_connect(hdev
)) {
2460 hci_dev_unlock(hdev
);
2464 cancel_adv_timeout(hdev
);
2465 hci_dev_unlock(hdev
);
2467 __hci_req_disable_advertising(req
);
2470 /* If controller is scanning, it means the background scanning is
2471 * running. Thus, we should temporarily stop it in order to set the
2472 * discovery scanning parameters.
2474 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2475 hci_req_add_le_scan_disable(req
);
2477 /* All active scans will be done with either a resolvable private
2478 * address (when privacy feature has been enabled) or non-resolvable
2481 err
= hci_update_random_address(req
, true, scan_use_rpa(hdev
),
2484 own_addr_type
= ADDR_LE_DEV_PUBLIC
;
2486 hci_req_start_scan(req
, LE_SCAN_ACTIVE
, interval
, DISCOV_LE_SCAN_WIN
,
2491 static int interleaved_discov(struct hci_request
*req
, unsigned long opt
)
2495 BT_DBG("%s", req
->hdev
->name
);
2497 err
= active_scan(req
, opt
);
2501 return bredr_inquiry(req
, DISCOV_BREDR_INQUIRY_LEN
);
2504 static void start_discovery(struct hci_dev
*hdev
, u8
*status
)
2506 unsigned long timeout
;
2508 BT_DBG("%s type %u", hdev
->name
, hdev
->discovery
.type
);
2510 switch (hdev
->discovery
.type
) {
2511 case DISCOV_TYPE_BREDR
:
2512 if (!hci_dev_test_flag(hdev
, HCI_INQUIRY
))
2513 hci_req_sync(hdev
, bredr_inquiry
,
2514 DISCOV_BREDR_INQUIRY_LEN
, HCI_CMD_TIMEOUT
,
2517 case DISCOV_TYPE_INTERLEAVED
:
2518 /* When running simultaneous discovery, the LE scanning time
2519 * should occupy the whole discovery time sine BR/EDR inquiry
2520 * and LE scanning are scheduled by the controller.
2522 * For interleaving discovery in comparison, BR/EDR inquiry
2523 * and LE scanning are done sequentially with separate
2526 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
2528 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2529 /* During simultaneous discovery, we double LE scan
2530 * interval. We must leave some time for the controller
2531 * to do BR/EDR inquiry.
2533 hci_req_sync(hdev
, interleaved_discov
,
2534 DISCOV_LE_SCAN_INT
* 2, HCI_CMD_TIMEOUT
,
2539 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
2540 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2541 HCI_CMD_TIMEOUT
, status
);
2543 case DISCOV_TYPE_LE
:
2544 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2545 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2546 HCI_CMD_TIMEOUT
, status
);
2549 *status
= HCI_ERROR_UNSPECIFIED
;
2556 BT_DBG("%s timeout %u ms", hdev
->name
, jiffies_to_msecs(timeout
));
2558 /* When service discovery is used and the controller has a
2559 * strict duplicate filter, it is important to remember the
2560 * start and duration of the scan. This is required for
2561 * restarting scanning during the discovery phase.
2563 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) &&
2564 hdev
->discovery
.result_filtering
) {
2565 hdev
->discovery
.scan_start
= jiffies
;
2566 hdev
->discovery
.scan_duration
= timeout
;
2569 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_disable
,
2573 bool hci_req_stop_discovery(struct hci_request
*req
)
2575 struct hci_dev
*hdev
= req
->hdev
;
2576 struct discovery_state
*d
= &hdev
->discovery
;
2577 struct hci_cp_remote_name_req_cancel cp
;
2578 struct inquiry_entry
*e
;
2581 BT_DBG("%s state %u", hdev
->name
, hdev
->discovery
.state
);
2583 if (d
->state
== DISCOVERY_FINDING
|| d
->state
== DISCOVERY_STOPPING
) {
2584 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2585 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2587 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2588 cancel_delayed_work(&hdev
->le_scan_disable
);
2589 hci_req_add_le_scan_disable(req
);
2594 /* Passive scanning */
2595 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2596 hci_req_add_le_scan_disable(req
);
2601 /* No further actions needed for LE-only discovery */
2602 if (d
->type
== DISCOV_TYPE_LE
)
2605 if (d
->state
== DISCOVERY_RESOLVING
|| d
->state
== DISCOVERY_STOPPING
) {
2606 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
2611 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
2612 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
2620 static int stop_discovery(struct hci_request
*req
, unsigned long opt
)
2622 hci_dev_lock(req
->hdev
);
2623 hci_req_stop_discovery(req
);
2624 hci_dev_unlock(req
->hdev
);
2629 static void discov_update(struct work_struct
*work
)
2631 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2635 switch (hdev
->discovery
.state
) {
2636 case DISCOVERY_STARTING
:
2637 start_discovery(hdev
, &status
);
2638 mgmt_start_discovery_complete(hdev
, status
);
2640 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2642 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
2644 case DISCOVERY_STOPPING
:
2645 hci_req_sync(hdev
, stop_discovery
, 0, HCI_CMD_TIMEOUT
, &status
);
2646 mgmt_stop_discovery_complete(hdev
, status
);
2648 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2650 case DISCOVERY_STOPPED
:
2656 static void discov_off(struct work_struct
*work
)
2658 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2661 BT_DBG("%s", hdev
->name
);
2665 /* When discoverable timeout triggers, then just make sure
2666 * the limited discoverable flag is cleared. Even in the case
2667 * of a timeout triggered from general discoverable, it is
2668 * safe to unconditionally clear the flag.
2670 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2671 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2672 hdev
->discov_timeout
= 0;
2674 hci_dev_unlock(hdev
);
2676 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, NULL
);
2677 mgmt_new_settings(hdev
);
2680 static int powered_update_hci(struct hci_request
*req
, unsigned long opt
)
2682 struct hci_dev
*hdev
= req
->hdev
;
2687 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
2688 !lmp_host_ssp_capable(hdev
)) {
2691 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
2693 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
2696 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
2697 sizeof(support
), &support
);
2701 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2702 lmp_bredr_capable(hdev
)) {
2703 struct hci_cp_write_le_host_supported cp
;
2708 /* Check first if we already have the right
2709 * host state (host features set)
2711 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
2712 cp
.simul
!= lmp_host_le_br_capable(hdev
))
2713 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
2717 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2718 /* Make sure the controller has a good default for
2719 * advertising data. This also applies to the case
2720 * where BR/EDR was toggled during the AUTO_OFF phase.
2722 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2723 list_empty(&hdev
->adv_instances
)) {
2726 if (ext_adv_capable(hdev
)) {
2727 err
= __hci_req_setup_ext_adv_instance(req
,
2730 __hci_req_update_scan_rsp_data(req
,
2734 __hci_req_update_adv_data(req
, 0x00);
2735 __hci_req_update_scan_rsp_data(req
, 0x00);
2738 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2739 if (!ext_adv_capable(hdev
))
2740 __hci_req_enable_advertising(req
);
2742 __hci_req_enable_ext_advertising(req
);
2744 } else if (!list_empty(&hdev
->adv_instances
)) {
2745 struct adv_info
*adv_instance
;
2747 adv_instance
= list_first_entry(&hdev
->adv_instances
,
2748 struct adv_info
, list
);
2749 __hci_req_schedule_adv_instance(req
,
2750 adv_instance
->instance
,
2755 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
2756 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
2757 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
,
2758 sizeof(link_sec
), &link_sec
);
2760 if (lmp_bredr_capable(hdev
)) {
2761 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
2762 __hci_req_write_fast_connectable(req
, true);
2764 __hci_req_write_fast_connectable(req
, false);
2765 __hci_req_update_scan(req
);
2766 __hci_req_update_class(req
);
2767 __hci_req_update_name(req
);
2768 __hci_req_update_eir(req
);
2771 hci_dev_unlock(hdev
);
2775 int __hci_req_hci_power_on(struct hci_dev
*hdev
)
2777 /* Register the available SMP channels (BR/EDR and LE) only when
2778 * successfully powering on the controller. This late
2779 * registration is required so that LE SMP can clearly decide if
2780 * the public address or static address is used.
2784 return __hci_req_sync(hdev
, powered_update_hci
, 0, HCI_CMD_TIMEOUT
,
2788 void hci_request_setup(struct hci_dev
*hdev
)
2790 INIT_WORK(&hdev
->discov_update
, discov_update
);
2791 INIT_WORK(&hdev
->bg_scan_update
, bg_scan_update
);
2792 INIT_WORK(&hdev
->scan_update
, scan_update_work
);
2793 INIT_WORK(&hdev
->connectable_update
, connectable_update_work
);
2794 INIT_WORK(&hdev
->discoverable_update
, discoverable_update_work
);
2795 INIT_DELAYED_WORK(&hdev
->discov_off
, discov_off
);
2796 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2797 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
2798 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, adv_timeout_expire
);
2801 void hci_request_cancel_all(struct hci_dev
*hdev
)
2803 hci_req_sync_cancel(hdev
, ENODEV
);
2805 cancel_work_sync(&hdev
->discov_update
);
2806 cancel_work_sync(&hdev
->bg_scan_update
);
2807 cancel_work_sync(&hdev
->scan_update
);
2808 cancel_work_sync(&hdev
->connectable_update
);
2809 cancel_work_sync(&hdev
->discoverable_update
);
2810 cancel_delayed_work_sync(&hdev
->discov_off
);
2811 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2812 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
2814 if (hdev
->adv_instance_timeout
) {
2815 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
2816 hdev
->adv_instance_timeout
= 0;