2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
39 skb_queue_head_init(&req
->cmd_q
);
44 void hci_req_purge(struct hci_request
*req
)
46 skb_queue_purge(&req
->cmd_q
);
49 bool hci_req_status_pend(struct hci_dev
*hdev
)
51 return hdev
->req_status
== HCI_REQ_PEND
;
54 static int req_run(struct hci_request
*req
, hci_req_complete_t complete
,
55 hci_req_complete_skb_t complete_skb
)
57 struct hci_dev
*hdev
= req
->hdev
;
61 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req
->cmd_q
);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req
->cmd_q
))
75 skb
= skb_peek_tail(&req
->cmd_q
);
77 bt_cb(skb
)->hci
.req_complete
= complete
;
78 } else if (complete_skb
) {
79 bt_cb(skb
)->hci
.req_complete_skb
= complete_skb
;
80 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_SKB
;
83 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
84 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
85 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
87 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
92 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
94 return req_run(req
, complete
, NULL
);
97 int hci_req_run_skb(struct hci_request
*req
, hci_req_complete_skb_t complete
)
99 return req_run(req
, NULL
, complete
);
102 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
105 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
107 if (hdev
->req_status
== HCI_REQ_PEND
) {
108 hdev
->req_result
= result
;
109 hdev
->req_status
= HCI_REQ_DONE
;
111 hdev
->req_skb
= skb_get(skb
);
112 wake_up_interruptible(&hdev
->req_wait_q
);
116 void hci_req_sync_cancel(struct hci_dev
*hdev
, int err
)
118 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
120 if (hdev
->req_status
== HCI_REQ_PEND
) {
121 hdev
->req_result
= err
;
122 hdev
->req_status
= HCI_REQ_CANCELED
;
123 wake_up_interruptible(&hdev
->req_wait_q
);
127 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
128 const void *param
, u8 event
, u32 timeout
)
130 struct hci_request req
;
134 BT_DBG("%s", hdev
->name
);
136 hci_req_init(&req
, hdev
);
138 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
140 hdev
->req_status
= HCI_REQ_PEND
;
142 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
146 err
= wait_event_interruptible_timeout(hdev
->req_wait_q
,
147 hdev
->req_status
!= HCI_REQ_PEND
, timeout
);
149 if (err
== -ERESTARTSYS
)
150 return ERR_PTR(-EINTR
);
152 switch (hdev
->req_status
) {
154 err
= -bt_to_errno(hdev
->req_result
);
157 case HCI_REQ_CANCELED
:
158 err
= -hdev
->req_result
;
166 hdev
->req_status
= hdev
->req_result
= 0;
168 hdev
->req_skb
= NULL
;
170 BT_DBG("%s end: err %d", hdev
->name
, err
);
178 return ERR_PTR(-ENODATA
);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
184 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
185 const void *param
, u32 timeout
)
187 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
189 EXPORT_SYMBOL(__hci_cmd_sync
);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev
*hdev
, int (*func
)(struct hci_request
*req
,
194 unsigned long opt
, u32 timeout
, u8
*hci_status
)
196 struct hci_request req
;
199 BT_DBG("%s start", hdev
->name
);
201 hci_req_init(&req
, hdev
);
203 hdev
->req_status
= HCI_REQ_PEND
;
205 err
= func(&req
, opt
);
208 *hci_status
= HCI_ERROR_UNSPECIFIED
;
212 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
214 hdev
->req_status
= 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err
== -ENODATA
) {
228 *hci_status
= HCI_ERROR_UNSPECIFIED
;
233 err
= wait_event_interruptible_timeout(hdev
->req_wait_q
,
234 hdev
->req_status
!= HCI_REQ_PEND
, timeout
);
236 if (err
== -ERESTARTSYS
)
239 switch (hdev
->req_status
) {
241 err
= -bt_to_errno(hdev
->req_result
);
243 *hci_status
= hdev
->req_result
;
246 case HCI_REQ_CANCELED
:
247 err
= -hdev
->req_result
;
249 *hci_status
= HCI_ERROR_UNSPECIFIED
;
255 *hci_status
= HCI_ERROR_UNSPECIFIED
;
259 kfree_skb(hdev
->req_skb
);
260 hdev
->req_skb
= NULL
;
261 hdev
->req_status
= hdev
->req_result
= 0;
263 BT_DBG("%s end: err %d", hdev
->name
, err
);
268 int hci_req_sync(struct hci_dev
*hdev
, int (*req
)(struct hci_request
*req
,
270 unsigned long opt
, u32 timeout
, u8
*hci_status
)
274 if (!test_bit(HCI_UP
, &hdev
->flags
))
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev
);
279 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
, hci_status
);
280 hci_req_sync_unlock(hdev
);
285 struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
288 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
289 struct hci_command_hdr
*hdr
;
292 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
296 hdr
= skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
297 hdr
->opcode
= cpu_to_le16(opcode
);
301 skb_put_data(skb
, param
, plen
);
303 BT_DBG("skb len %d", skb
->len
);
305 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
306 hci_skb_opcode(skb
) = opcode
;
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
313 const void *param
, u8 event
)
315 struct hci_dev
*hdev
= req
->hdev
;
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
326 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
328 bt_dev_err(hdev
, "no memory for command (opcode 0x%4.4x)",
334 if (skb_queue_empty(&req
->cmd_q
))
335 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
337 bt_cb(skb
)->hci
.req_event
= event
;
339 skb_queue_tail(&req
->cmd_q
, skb
);
342 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
345 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
348 void __hci_req_write_fast_connectable(struct hci_request
*req
, bool enable
)
350 struct hci_dev
*hdev
= req
->hdev
;
351 struct hci_cp_write_page_scan_activity acp
;
354 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
357 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
361 type
= PAGE_SCAN_TYPE_INTERLACED
;
363 /* 160 msec page scan interval */
364 acp
.interval
= cpu_to_le16(0x0100);
366 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
368 /* default 1.28 sec page scan */
369 acp
.interval
= cpu_to_le16(0x0800);
372 acp
.window
= cpu_to_le16(0x0012);
374 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
375 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
376 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
379 if (hdev
->page_scan_type
!= type
)
380 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
383 /* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
387 * This function requires the caller holds hdev->lock.
389 static void __hci_update_background_scan(struct hci_request
*req
)
391 struct hci_dev
*hdev
= req
->hdev
;
393 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
394 test_bit(HCI_INIT
, &hdev
->flags
) ||
395 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
396 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
397 hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) ||
398 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
405 /* If discovery is active don't interfere with it */
406 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
416 hci_discovery_filter_clear(hdev
);
418 if (list_empty(&hdev
->pend_le_conns
) &&
419 list_empty(&hdev
->pend_le_reports
)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
429 hci_req_add_le_scan_disable(req
);
431 BT_DBG("%s stopping background scanning", hdev
->name
);
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
441 if (hci_lookup_le_connect(hdev
))
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
447 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
448 hci_req_add_le_scan_disable(req
);
450 hci_req_add_le_passive_scan(req
);
452 BT_DBG("%s starting background scanning", hdev
->name
);
456 void __hci_req_update_name(struct hci_request
*req
)
458 struct hci_dev
*hdev
= req
->hdev
;
459 struct hci_cp_write_local_name cp
;
461 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
463 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
466 #define PNP_INFO_SVCLASS_ID 0x1200
468 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
470 u8
*ptr
= data
, *uuids_start
= NULL
;
471 struct bt_uuid
*uuid
;
476 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
479 if (uuid
->size
!= 16)
482 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
486 if (uuid16
== PNP_INFO_SVCLASS_ID
)
492 uuids_start
[1] = EIR_UUID16_ALL
;
496 /* Stop if not enough space to put next UUID */
497 if ((ptr
- data
) + sizeof(u16
) > len
) {
498 uuids_start
[1] = EIR_UUID16_SOME
;
502 *ptr
++ = (uuid16
& 0x00ff);
503 *ptr
++ = (uuid16
& 0xff00) >> 8;
504 uuids_start
[0] += sizeof(uuid16
);
510 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
512 u8
*ptr
= data
, *uuids_start
= NULL
;
513 struct bt_uuid
*uuid
;
518 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
519 if (uuid
->size
!= 32)
525 uuids_start
[1] = EIR_UUID32_ALL
;
529 /* Stop if not enough space to put next UUID */
530 if ((ptr
- data
) + sizeof(u32
) > len
) {
531 uuids_start
[1] = EIR_UUID32_SOME
;
535 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
537 uuids_start
[0] += sizeof(u32
);
543 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
545 u8
*ptr
= data
, *uuids_start
= NULL
;
546 struct bt_uuid
*uuid
;
551 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
552 if (uuid
->size
!= 128)
558 uuids_start
[1] = EIR_UUID128_ALL
;
562 /* Stop if not enough space to put next UUID */
563 if ((ptr
- data
) + 16 > len
) {
564 uuids_start
[1] = EIR_UUID128_SOME
;
568 memcpy(ptr
, uuid
->uuid
, 16);
570 uuids_start
[0] += 16;
576 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
581 name_len
= strlen(hdev
->dev_name
);
587 ptr
[1] = EIR_NAME_SHORT
;
589 ptr
[1] = EIR_NAME_COMPLETE
;
591 /* EIR Data length */
592 ptr
[0] = name_len
+ 1;
594 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
596 ptr
+= (name_len
+ 2);
599 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
601 ptr
[1] = EIR_TX_POWER
;
602 ptr
[2] = (u8
) hdev
->inq_tx_power
;
607 if (hdev
->devid_source
> 0) {
609 ptr
[1] = EIR_DEVICE_ID
;
611 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
612 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
613 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
614 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
619 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
620 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
621 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
624 void __hci_req_update_eir(struct hci_request
*req
)
626 struct hci_dev
*hdev
= req
->hdev
;
627 struct hci_cp_write_eir cp
;
629 if (!hdev_is_powered(hdev
))
632 if (!lmp_ext_inq_capable(hdev
))
635 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
638 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
641 memset(&cp
, 0, sizeof(cp
));
643 create_eir(hdev
, cp
.data
);
645 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
648 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
650 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
653 void hci_req_add_le_scan_disable(struct hci_request
*req
)
655 struct hci_dev
*hdev
= req
->hdev
;
657 if (use_ext_scan(hdev
)) {
658 struct hci_cp_le_set_ext_scan_enable cp
;
660 memset(&cp
, 0, sizeof(cp
));
661 cp
.enable
= LE_SCAN_DISABLE
;
662 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
, sizeof(cp
),
665 struct hci_cp_le_set_scan_enable cp
;
667 memset(&cp
, 0, sizeof(cp
));
668 cp
.enable
= LE_SCAN_DISABLE
;
669 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
673 static void add_to_white_list(struct hci_request
*req
,
674 struct hci_conn_params
*params
)
676 struct hci_cp_le_add_to_white_list cp
;
678 cp
.bdaddr_type
= params
->addr_type
;
679 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
681 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
684 static u8
update_white_list(struct hci_request
*req
)
686 struct hci_dev
*hdev
= req
->hdev
;
687 struct hci_conn_params
*params
;
688 struct bdaddr_list
*b
;
689 uint8_t white_list_entries
= 0;
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
697 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
701 if (!hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
702 &b
->bdaddr
, b
->bdaddr_type
) &&
703 !hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
704 &b
->bdaddr
, b
->bdaddr_type
)) {
705 struct hci_cp_le_del_from_white_list cp
;
707 cp
.bdaddr_type
= b
->bdaddr_type
;
708 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
710 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
715 if (hci_find_irk_by_addr(hdev
, &b
->bdaddr
, b
->bdaddr_type
)) {
716 /* White list can not be used with RPAs */
720 white_list_entries
++;
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
733 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
734 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
735 ¶ms
->addr
, params
->addr_type
))
738 if (white_list_entries
>= hdev
->le_white_list_size
) {
739 /* Select filter policy to accept all advertising */
743 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
744 params
->addr_type
)) {
745 /* White list can not be used with RPAs */
749 white_list_entries
++;
750 add_to_white_list(req
, params
);
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
757 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
758 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
759 ¶ms
->addr
, params
->addr_type
))
762 if (white_list_entries
>= hdev
->le_white_list_size
) {
763 /* Select filter policy to accept all advertising */
767 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
768 params
->addr_type
)) {
769 /* White list can not be used with RPAs */
773 white_list_entries
++;
774 add_to_white_list(req
, params
);
777 /* Select filter policy to use white list */
781 static bool scan_use_rpa(struct hci_dev
*hdev
)
783 return hci_dev_test_flag(hdev
, HCI_PRIVACY
);
786 static void hci_req_start_scan(struct hci_request
*req
, u8 type
, u16 interval
,
787 u16 window
, u8 own_addr_type
, u8 filter_policy
)
789 struct hci_dev
*hdev
= req
->hdev
;
791 /* Use ext scanning if set ext scan param and ext scan enable is
794 if (use_ext_scan(hdev
)) {
795 struct hci_cp_le_set_ext_scan_params
*ext_param_cp
;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp
;
797 struct hci_cp_le_scan_phy_params
*phy_params
;
798 u8 data
[sizeof(*ext_param_cp
) + sizeof(*phy_params
) * 2];
801 ext_param_cp
= (void *)data
;
802 phy_params
= (void *)ext_param_cp
->data
;
804 memset(ext_param_cp
, 0, sizeof(*ext_param_cp
));
805 ext_param_cp
->own_addr_type
= own_addr_type
;
806 ext_param_cp
->filter_policy
= filter_policy
;
808 plen
= sizeof(*ext_param_cp
);
810 if (scan_1m(hdev
) || scan_2m(hdev
)) {
811 ext_param_cp
->scanning_phys
|= LE_SCAN_PHY_1M
;
813 memset(phy_params
, 0, sizeof(*phy_params
));
814 phy_params
->type
= type
;
815 phy_params
->interval
= cpu_to_le16(interval
);
816 phy_params
->window
= cpu_to_le16(window
);
818 plen
+= sizeof(*phy_params
);
822 if (scan_coded(hdev
)) {
823 ext_param_cp
->scanning_phys
|= LE_SCAN_PHY_CODED
;
825 memset(phy_params
, 0, sizeof(*phy_params
));
826 phy_params
->type
= type
;
827 phy_params
->interval
= cpu_to_le16(interval
);
828 phy_params
->window
= cpu_to_le16(window
);
830 plen
+= sizeof(*phy_params
);
834 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_PARAMS
,
837 memset(&ext_enable_cp
, 0, sizeof(ext_enable_cp
));
838 ext_enable_cp
.enable
= LE_SCAN_ENABLE
;
839 ext_enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
841 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
,
842 sizeof(ext_enable_cp
), &ext_enable_cp
);
844 struct hci_cp_le_set_scan_param param_cp
;
845 struct hci_cp_le_set_scan_enable enable_cp
;
847 memset(¶m_cp
, 0, sizeof(param_cp
));
848 param_cp
.type
= type
;
849 param_cp
.interval
= cpu_to_le16(interval
);
850 param_cp
.window
= cpu_to_le16(window
);
851 param_cp
.own_address_type
= own_addr_type
;
852 param_cp
.filter_policy
= filter_policy
;
853 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
856 memset(&enable_cp
, 0, sizeof(enable_cp
));
857 enable_cp
.enable
= LE_SCAN_ENABLE
;
858 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
859 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
864 void hci_req_add_le_passive_scan(struct hci_request
*req
)
866 struct hci_dev
*hdev
= req
->hdev
;
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
876 if (hci_update_random_address(req
, false, scan_use_rpa(hdev
),
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
884 filter_policy
= update_white_list(req
);
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
895 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
) &&
896 (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
))
897 filter_policy
|= 0x02;
899 hci_req_start_scan(req
, LE_SCAN_PASSIVE
, hdev
->le_scan_interval
,
900 hdev
->le_scan_window
, own_addr_type
, filter_policy
);
903 static u8
get_adv_instance_scan_rsp_len(struct hci_dev
*hdev
, u8 instance
)
905 struct adv_info
*adv_instance
;
907 /* Instance 0x00 always set local name */
908 if (instance
== 0x00)
911 adv_instance
= hci_find_adv_instance(hdev
, instance
);
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
918 return adv_instance
->scan_rsp_len
;
921 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
923 u8 instance
= hdev
->cur_adv_instance
;
924 struct adv_info
*adv_instance
;
926 /* Instance 0x00 always set local name */
927 if (instance
== 0x00)
930 adv_instance
= hci_find_adv_instance(hdev
, instance
);
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
937 return adv_instance
->scan_rsp_len
;
940 void __hci_req_disable_advertising(struct hci_request
*req
)
942 if (ext_adv_capable(req
->hdev
)) {
943 struct hci_cp_le_set_ext_adv_enable cp
;
946 /* Disable all sets since we only support one set at the moment */
947 cp
.num_of_sets
= 0x00;
949 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_ENABLE
, sizeof(cp
), &cp
);
953 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
957 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
960 struct adv_info
*adv_instance
;
962 if (instance
== 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
966 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
971 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
972 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
974 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
975 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
976 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
977 flags
|= MGMT_ADV_FLAG_DISCOV
;
982 adv_instance
= hci_find_adv_instance(hdev
, instance
);
984 /* Return 0 when we got an invalid instance identifier. */
988 return adv_instance
->flags
;
991 static bool adv_use_rpa(struct hci_dev
*hdev
, uint32_t flags
)
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev
, HCI_PRIVACY
))
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1004 if ((flags
& MGMT_ADV_FLAG_DISCOV
) &&
1005 hci_dev_test_flag(hdev
, HCI_BONDABLE
))
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1014 static bool is_advertising_allowed(struct hci_dev
*hdev
, bool connectable
)
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev
, LE_LINK
) == 0)
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev
->conn_hash
.le_num_slave
> 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable
&& !(hdev
->le_states
[2] & 0x10))
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1029 if (connectable
&& (!(hdev
->le_states
[4] & 0x40) ||
1030 !(hdev
->le_states
[2] & 0x20)))
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev
, LE_LINK
) != hdev
->conn_hash
.le_num_slave
) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable
&& !(hdev
->le_states
[2] & 0x02))
1040 /* Master connection state and connectable mode bit 35 and
1043 if (connectable
&& (!(hdev
->le_states
[4] & 0x08) ||
1044 !(hdev
->le_states
[2] & 0x08)))
1051 void __hci_req_enable_advertising(struct hci_request
*req
)
1053 struct hci_dev
*hdev
= req
->hdev
;
1054 struct hci_cp_le_set_adv_param cp
;
1055 u8 own_addr_type
, enable
= 0x01;
1057 u16 adv_min_interval
, adv_max_interval
;
1060 flags
= get_adv_instance_flags(hdev
, hdev
->cur_adv_instance
);
1062 /* If the "connectable" instance flag was not set, then choose between
1063 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1065 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1066 mgmt_get_connectable(hdev
);
1068 if (!is_advertising_allowed(hdev
, connectable
))
1071 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1072 __hci_req_disable_advertising(req
);
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1079 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1085 if (hci_update_random_address(req
, !connectable
,
1086 adv_use_rpa(hdev
, flags
),
1087 &own_addr_type
) < 0)
1090 memset(&cp
, 0, sizeof(cp
));
1093 cp
.type
= LE_ADV_IND
;
1095 adv_min_interval
= hdev
->le_adv_min_interval
;
1096 adv_max_interval
= hdev
->le_adv_max_interval
;
1098 if (get_cur_adv_instance_scan_rsp_len(hdev
))
1099 cp
.type
= LE_ADV_SCAN_IND
;
1101 cp
.type
= LE_ADV_NONCONN_IND
;
1103 if (!hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) ||
1104 hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
)) {
1105 adv_min_interval
= DISCOV_LE_FAST_ADV_INT_MIN
;
1106 adv_max_interval
= DISCOV_LE_FAST_ADV_INT_MAX
;
1108 adv_min_interval
= hdev
->le_adv_min_interval
;
1109 adv_max_interval
= hdev
->le_adv_max_interval
;
1113 cp
.min_interval
= cpu_to_le16(adv_min_interval
);
1114 cp
.max_interval
= cpu_to_le16(adv_max_interval
);
1115 cp
.own_address_type
= own_addr_type
;
1116 cp
.channel_map
= hdev
->le_adv_channel_map
;
1118 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1120 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1123 u8
append_local_name(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
1126 size_t complete_len
;
1128 /* no space left for name (+ NULL + type + len) */
1129 if ((HCI_MAX_AD_LENGTH
- ad_len
) < HCI_MAX_SHORT_NAME_LENGTH
+ 3)
1132 /* use complete name if present and fits */
1133 complete_len
= strlen(hdev
->dev_name
);
1134 if (complete_len
&& complete_len
<= HCI_MAX_SHORT_NAME_LENGTH
)
1135 return eir_append_data(ptr
, ad_len
, EIR_NAME_COMPLETE
,
1136 hdev
->dev_name
, complete_len
+ 1);
1138 /* use short name if present */
1139 short_len
= strlen(hdev
->short_name
);
1141 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
,
1142 hdev
->short_name
, short_len
+ 1);
1144 /* use shortened full name if present, we already know that name
1145 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1148 u8 name
[HCI_MAX_SHORT_NAME_LENGTH
+ 1];
1150 memcpy(name
, hdev
->dev_name
, HCI_MAX_SHORT_NAME_LENGTH
);
1151 name
[HCI_MAX_SHORT_NAME_LENGTH
] = '\0';
1153 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
, name
,
1160 static u8
append_appearance(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
1162 return eir_append_le16(ptr
, ad_len
, EIR_APPEARANCE
, hdev
->appearance
);
1165 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
1167 u8 scan_rsp_len
= 0;
1169 if (hdev
->appearance
) {
1170 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1173 return append_local_name(hdev
, ptr
, scan_rsp_len
);
1176 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
1179 struct adv_info
*adv_instance
;
1181 u8 scan_rsp_len
= 0;
1183 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1187 instance_flags
= adv_instance
->flags
;
1189 if ((instance_flags
& MGMT_ADV_FLAG_APPEARANCE
) && hdev
->appearance
) {
1190 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1193 memcpy(&ptr
[scan_rsp_len
], adv_instance
->scan_rsp_data
,
1194 adv_instance
->scan_rsp_len
);
1196 scan_rsp_len
+= adv_instance
->scan_rsp_len
;
1198 if (instance_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
1199 scan_rsp_len
= append_local_name(hdev
, ptr
, scan_rsp_len
);
1201 return scan_rsp_len
;
1204 void __hci_req_update_scan_rsp_data(struct hci_request
*req
, u8 instance
)
1206 struct hci_dev
*hdev
= req
->hdev
;
1209 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1212 if (ext_adv_capable(hdev
)) {
1213 struct hci_cp_le_set_ext_scan_rsp_data cp
;
1215 memset(&cp
, 0, sizeof(cp
));
1218 len
= create_instance_scan_rsp_data(hdev
, instance
,
1221 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
1223 if (hdev
->scan_rsp_data_len
== len
&&
1224 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
1227 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
1228 hdev
->scan_rsp_data_len
= len
;
1232 cp
.operation
= LE_SET_ADV_DATA_OP_COMPLETE
;
1233 cp
.frag_pref
= LE_SET_ADV_DATA_NO_FRAG
;
1235 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA
, sizeof(cp
),
1238 struct hci_cp_le_set_scan_rsp_data cp
;
1240 memset(&cp
, 0, sizeof(cp
));
1243 len
= create_instance_scan_rsp_data(hdev
, instance
,
1246 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
1248 if (hdev
->scan_rsp_data_len
== len
&&
1249 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
1252 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
1253 hdev
->scan_rsp_data_len
= len
;
1257 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
1261 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1263 struct adv_info
*adv_instance
= NULL
;
1264 u8 ad_len
= 0, flags
= 0;
1267 /* Return 0 when the current instance identifier is invalid. */
1269 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1274 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1276 /* If instance already has the flags set skip adding it once
1279 if (adv_instance
&& eir_get_data(adv_instance
->adv_data
,
1280 adv_instance
->adv_data_len
, EIR_FLAGS
,
1284 /* The Add Advertising command allows userspace to set both the general
1285 * and limited discoverable flags.
1287 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1288 flags
|= LE_AD_GENERAL
;
1290 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1291 flags
|= LE_AD_LIMITED
;
1293 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1294 flags
|= LE_AD_NO_BREDR
;
1296 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1297 /* If a discovery flag wasn't provided, simply use the global
1301 flags
|= mgmt_get_adv_discov_flags(hdev
);
1303 /* If flags would still be empty, then there is no need to
1304 * include the "Flags" AD field".
1318 memcpy(ptr
, adv_instance
->adv_data
,
1319 adv_instance
->adv_data_len
);
1320 ad_len
+= adv_instance
->adv_data_len
;
1321 ptr
+= adv_instance
->adv_data_len
;
1324 if (instance_flags
& MGMT_ADV_FLAG_TX_POWER
) {
1327 if (ext_adv_capable(hdev
)) {
1329 adv_tx_power
= adv_instance
->tx_power
;
1331 adv_tx_power
= hdev
->adv_tx_power
;
1333 adv_tx_power
= hdev
->adv_tx_power
;
1336 /* Provide Tx Power only if we can provide a valid value for it */
1337 if (adv_tx_power
!= HCI_TX_POWER_INVALID
) {
1339 ptr
[1] = EIR_TX_POWER
;
1340 ptr
[2] = (u8
)adv_tx_power
;
1350 void __hci_req_update_adv_data(struct hci_request
*req
, u8 instance
)
1352 struct hci_dev
*hdev
= req
->hdev
;
1355 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1358 if (ext_adv_capable(hdev
)) {
1359 struct hci_cp_le_set_ext_adv_data cp
;
1361 memset(&cp
, 0, sizeof(cp
));
1363 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1365 /* There's nothing to do if the data hasn't changed */
1366 if (hdev
->adv_data_len
== len
&&
1367 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1370 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1371 hdev
->adv_data_len
= len
;
1375 cp
.operation
= LE_SET_ADV_DATA_OP_COMPLETE
;
1376 cp
.frag_pref
= LE_SET_ADV_DATA_NO_FRAG
;
1378 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_DATA
, sizeof(cp
), &cp
);
1380 struct hci_cp_le_set_adv_data cp
;
1382 memset(&cp
, 0, sizeof(cp
));
1384 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1386 /* There's nothing to do if the data hasn't changed */
1387 if (hdev
->adv_data_len
== len
&&
1388 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1391 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1392 hdev
->adv_data_len
= len
;
1396 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1400 int hci_req_update_adv_data(struct hci_dev
*hdev
, u8 instance
)
1402 struct hci_request req
;
1404 hci_req_init(&req
, hdev
);
1405 __hci_req_update_adv_data(&req
, instance
);
1407 return hci_req_run(&req
, NULL
);
1410 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1412 BT_DBG("%s status %u", hdev
->name
, status
);
1415 void hci_req_reenable_advertising(struct hci_dev
*hdev
)
1417 struct hci_request req
;
1419 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1420 list_empty(&hdev
->adv_instances
))
1423 hci_req_init(&req
, hdev
);
1425 if (hdev
->cur_adv_instance
) {
1426 __hci_req_schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
1429 if (ext_adv_capable(hdev
)) {
1430 __hci_req_start_ext_adv(&req
, 0x00);
1432 __hci_req_update_adv_data(&req
, 0x00);
1433 __hci_req_update_scan_rsp_data(&req
, 0x00);
1434 __hci_req_enable_advertising(&req
);
1438 hci_req_run(&req
, adv_enable_complete
);
1441 static void adv_timeout_expire(struct work_struct
*work
)
1443 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1444 adv_instance_expire
.work
);
1446 struct hci_request req
;
1449 BT_DBG("%s", hdev
->name
);
1453 hdev
->adv_instance_timeout
= 0;
1455 instance
= hdev
->cur_adv_instance
;
1456 if (instance
== 0x00)
1459 hci_req_init(&req
, hdev
);
1461 hci_req_clear_adv_instance(hdev
, NULL
, &req
, instance
, false);
1463 if (list_empty(&hdev
->adv_instances
))
1464 __hci_req_disable_advertising(&req
);
1466 hci_req_run(&req
, NULL
);
1469 hci_dev_unlock(hdev
);
1472 int hci_get_random_address(struct hci_dev
*hdev
, bool require_privacy
,
1473 bool use_rpa
, struct adv_info
*adv_instance
,
1474 u8
*own_addr_type
, bdaddr_t
*rand_addr
)
1478 bacpy(rand_addr
, BDADDR_ANY
);
1480 /* If privacy is enabled use a resolvable private address. If
1481 * current RPA has expired then generate a new one.
1486 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1489 if (!adv_instance
->rpa_expired
&&
1490 !bacmp(&adv_instance
->random_addr
, &hdev
->rpa
))
1493 adv_instance
->rpa_expired
= false;
1495 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1496 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1500 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1502 BT_ERR("%s failed to generate new RPA", hdev
->name
);
1506 bacpy(rand_addr
, &hdev
->rpa
);
1508 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1510 queue_delayed_work(hdev
->workqueue
,
1511 &adv_instance
->rpa_expired_cb
, to
);
1513 queue_delayed_work(hdev
->workqueue
,
1514 &hdev
->rpa_expired
, to
);
1519 /* In case of required privacy without resolvable private address,
1520 * use an non-resolvable private address. This is useful for
1521 * non-connectable advertising.
1523 if (require_privacy
) {
1527 /* The non-resolvable private address is generated
1528 * from random six bytes with the two most significant
1531 get_random_bytes(&nrpa
, 6);
1534 /* The non-resolvable private address shall not be
1535 * equal to the public address.
1537 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1541 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1542 bacpy(rand_addr
, &nrpa
);
1547 /* No privacy so use a public address. */
1548 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1553 void __hci_req_clear_ext_adv_sets(struct hci_request
*req
)
1555 hci_req_add(req
, HCI_OP_LE_CLEAR_ADV_SETS
, 0, NULL
);
1558 int __hci_req_setup_ext_adv_instance(struct hci_request
*req
, u8 instance
)
1560 struct hci_cp_le_set_ext_adv_params cp
;
1561 struct hci_dev
*hdev
= req
->hdev
;
1564 bdaddr_t random_addr
;
1567 struct adv_info
*adv_instance
;
1569 /* In ext adv set param interval is 3 octets */
1570 const u8 adv_interval
[3] = { 0x00, 0x08, 0x00 };
1573 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1577 adv_instance
= NULL
;
1580 flags
= get_adv_instance_flags(hdev
, instance
);
1582 /* If the "connectable" instance flag was not set, then choose between
1583 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1585 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1586 mgmt_get_connectable(hdev
);
1588 if (!is_advertising_allowed(hdev
, connectable
))
1591 /* Set require_privacy to true only when non-connectable
1592 * advertising is used. In that case it is fine to use a
1593 * non-resolvable private address.
1595 err
= hci_get_random_address(hdev
, !connectable
,
1596 adv_use_rpa(hdev
, flags
), adv_instance
,
1597 &own_addr_type
, &random_addr
);
1601 memset(&cp
, 0, sizeof(cp
));
1603 memcpy(cp
.min_interval
, adv_interval
, sizeof(cp
.min_interval
));
1604 memcpy(cp
.max_interval
, adv_interval
, sizeof(cp
.max_interval
));
1606 secondary_adv
= (flags
& MGMT_ADV_FLAG_SEC_MASK
);
1610 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_CONN_IND
);
1612 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_ADV_IND
);
1613 } else if (get_adv_instance_scan_rsp_len(hdev
, instance
)) {
1615 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_SCAN_IND
);
1617 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_ADV_SCAN_IND
);
1620 cp
.evt_properties
= cpu_to_le16(LE_EXT_ADV_NON_CONN_IND
);
1622 cp
.evt_properties
= cpu_to_le16(LE_LEGACY_NONCONN_IND
);
1625 cp
.own_addr_type
= own_addr_type
;
1626 cp
.channel_map
= hdev
->le_adv_channel_map
;
1628 cp
.handle
= instance
;
1630 if (flags
& MGMT_ADV_FLAG_SEC_2M
) {
1631 cp
.primary_phy
= HCI_ADV_PHY_1M
;
1632 cp
.secondary_phy
= HCI_ADV_PHY_2M
;
1633 } else if (flags
& MGMT_ADV_FLAG_SEC_CODED
) {
1634 cp
.primary_phy
= HCI_ADV_PHY_CODED
;
1635 cp
.secondary_phy
= HCI_ADV_PHY_CODED
;
1637 /* In all other cases use 1M */
1638 cp
.primary_phy
= HCI_ADV_PHY_1M
;
1639 cp
.secondary_phy
= HCI_ADV_PHY_1M
;
1642 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_PARAMS
, sizeof(cp
), &cp
);
1644 if (own_addr_type
== ADDR_LE_DEV_RANDOM
&&
1645 bacmp(&random_addr
, BDADDR_ANY
)) {
1646 struct hci_cp_le_set_adv_set_rand_addr cp
;
1648 /* Check if random address need to be updated */
1650 if (!bacmp(&random_addr
, &adv_instance
->random_addr
))
1653 if (!bacmp(&random_addr
, &hdev
->random_addr
))
1657 memset(&cp
, 0, sizeof(cp
));
1660 bacpy(&cp
.bdaddr
, &random_addr
);
1663 HCI_OP_LE_SET_ADV_SET_RAND_ADDR
,
1670 int __hci_req_enable_ext_advertising(struct hci_request
*req
, u8 instance
)
1672 struct hci_dev
*hdev
= req
->hdev
;
1673 struct hci_cp_le_set_ext_adv_enable
*cp
;
1674 struct hci_cp_ext_adv_set
*adv_set
;
1675 u8 data
[sizeof(*cp
) + sizeof(*adv_set
) * 1];
1676 struct adv_info
*adv_instance
;
1679 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1683 adv_instance
= NULL
;
1687 adv_set
= (void *) cp
->data
;
1689 memset(cp
, 0, sizeof(*cp
));
1692 cp
->num_of_sets
= 0x01;
1694 memset(adv_set
, 0, sizeof(*adv_set
));
1696 adv_set
->handle
= instance
;
1698 /* Set duration per instance since controller is responsible for
1701 if (adv_instance
&& adv_instance
->duration
) {
1702 u16 duration
= adv_instance
->timeout
* MSEC_PER_SEC
;
1704 /* Time = N * 10 ms */
1705 adv_set
->duration
= cpu_to_le16(duration
/ 10);
1708 hci_req_add(req
, HCI_OP_LE_SET_EXT_ADV_ENABLE
,
1709 sizeof(*cp
) + sizeof(*adv_set
) * cp
->num_of_sets
,
1715 int __hci_req_start_ext_adv(struct hci_request
*req
, u8 instance
)
1717 struct hci_dev
*hdev
= req
->hdev
;
1720 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1721 __hci_req_disable_advertising(req
);
1723 err
= __hci_req_setup_ext_adv_instance(req
, instance
);
1727 __hci_req_update_scan_rsp_data(req
, instance
);
1728 __hci_req_enable_ext_advertising(req
, instance
);
1733 int __hci_req_schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1736 struct hci_dev
*hdev
= req
->hdev
;
1737 struct adv_info
*adv_instance
= NULL
;
1740 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1741 list_empty(&hdev
->adv_instances
))
1744 if (hdev
->adv_instance_timeout
)
1747 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1751 /* A zero timeout means unlimited advertising. As long as there is
1752 * only one instance, duration should be ignored. We still set a timeout
1753 * in case further instances are being added later on.
1755 * If the remaining lifetime of the instance is more than the duration
1756 * then the timeout corresponds to the duration, otherwise it will be
1757 * reduced to the remaining instance lifetime.
1759 if (adv_instance
->timeout
== 0 ||
1760 adv_instance
->duration
<= adv_instance
->remaining_time
)
1761 timeout
= adv_instance
->duration
;
1763 timeout
= adv_instance
->remaining_time
;
1765 /* The remaining time is being reduced unless the instance is being
1766 * advertised without time limit.
1768 if (adv_instance
->timeout
)
1769 adv_instance
->remaining_time
=
1770 adv_instance
->remaining_time
- timeout
;
1772 /* Only use work for scheduling instances with legacy advertising */
1773 if (!ext_adv_capable(hdev
)) {
1774 hdev
->adv_instance_timeout
= timeout
;
1775 queue_delayed_work(hdev
->req_workqueue
,
1776 &hdev
->adv_instance_expire
,
1777 msecs_to_jiffies(timeout
* 1000));
1780 /* If we're just re-scheduling the same instance again then do not
1781 * execute any HCI commands. This happens when a single instance is
1784 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1785 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1788 hdev
->cur_adv_instance
= instance
;
1789 if (ext_adv_capable(hdev
)) {
1790 __hci_req_start_ext_adv(req
, instance
);
1792 __hci_req_update_adv_data(req
, instance
);
1793 __hci_req_update_scan_rsp_data(req
, instance
);
1794 __hci_req_enable_advertising(req
);
1800 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1802 if (hdev
->adv_instance_timeout
) {
1803 hdev
->adv_instance_timeout
= 0;
1804 cancel_delayed_work(&hdev
->adv_instance_expire
);
1808 /* For a single instance:
1809 * - force == true: The instance will be removed even when its remaining
1810 * lifetime is not zero.
1811 * - force == false: the instance will be deactivated but kept stored unless
1812 * the remaining lifetime is zero.
1814 * For instance == 0x00:
1815 * - force == true: All instances will be removed regardless of their timeout
1817 * - force == false: Only instances that have a timeout will be removed.
1819 void hci_req_clear_adv_instance(struct hci_dev
*hdev
, struct sock
*sk
,
1820 struct hci_request
*req
, u8 instance
,
1823 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1827 /* Cancel any timeout concerning the removed instance(s). */
1828 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1829 cancel_adv_timeout(hdev
);
1831 /* Get the next instance to advertise BEFORE we remove
1832 * the current one. This can be the same instance again
1833 * if there is only one instance.
1835 if (instance
&& hdev
->cur_adv_instance
== instance
)
1836 next_instance
= hci_get_next_instance(hdev
, instance
);
1838 if (instance
== 0x00) {
1839 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1841 if (!(force
|| adv_instance
->timeout
))
1844 rem_inst
= adv_instance
->instance
;
1845 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1847 mgmt_advertising_removed(sk
, hdev
, rem_inst
);
1850 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1852 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1853 !adv_instance
->remaining_time
)) {
1854 /* Don't advertise a removed instance. */
1855 if (next_instance
&&
1856 next_instance
->instance
== instance
)
1857 next_instance
= NULL
;
1859 err
= hci_remove_adv_instance(hdev
, instance
);
1861 mgmt_advertising_removed(sk
, hdev
, instance
);
1865 if (!req
|| !hdev_is_powered(hdev
) ||
1866 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1870 __hci_req_schedule_adv_instance(req
, next_instance
->instance
,
1874 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
1876 struct hci_dev
*hdev
= req
->hdev
;
1878 /* If we're advertising or initiating an LE connection we can't
1879 * go ahead and change the random address at this time. This is
1880 * because the eventual initiator address used for the
1881 * subsequently created connection will be undefined (some
1882 * controllers use the new address and others the one we had
1883 * when the operation started).
1885 * In this kind of scenario skip the update and let the random
1886 * address be updated at the next cycle.
1888 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
) ||
1889 hci_lookup_le_connect(hdev
)) {
1890 BT_DBG("Deferring random address update");
1891 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1895 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
1898 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
1899 bool use_rpa
, u8
*own_addr_type
)
1901 struct hci_dev
*hdev
= req
->hdev
;
1904 /* If privacy is enabled use a resolvable private address. If
1905 * current RPA has expired or there is something else than
1906 * the current RPA in use, then generate a new one.
1911 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1913 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1914 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1917 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1919 bt_dev_err(hdev
, "failed to generate new RPA");
1923 set_random_addr(req
, &hdev
->rpa
);
1925 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1926 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
1931 /* In case of required privacy without resolvable private address,
1932 * use an non-resolvable private address. This is useful for active
1933 * scanning and non-connectable advertising.
1935 if (require_privacy
) {
1939 /* The non-resolvable private address is generated
1940 * from random six bytes with the two most significant
1943 get_random_bytes(&nrpa
, 6);
1946 /* The non-resolvable private address shall not be
1947 * equal to the public address.
1949 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1953 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1954 set_random_addr(req
, &nrpa
);
1958 /* If forcing static address is in use or there is no public
1959 * address use the static address as random address (but skip
1960 * the HCI command if the current random address is already the
1963 * In case BR/EDR has been disabled on a dual-mode controller
1964 * and a static address has been configured, then use that
1965 * address instead of the public BR/EDR address.
1967 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
1968 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
1969 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
1970 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
1971 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1972 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
1973 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
1974 &hdev
->static_addr
);
1978 /* Neither privacy nor static address is being used so use a
1981 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1986 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
1988 struct bdaddr_list
*b
;
1990 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
1991 struct hci_conn
*conn
;
1993 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
1997 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2004 void __hci_req_update_scan(struct hci_request
*req
)
2006 struct hci_dev
*hdev
= req
->hdev
;
2009 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2012 if (!hdev_is_powered(hdev
))
2015 if (mgmt_powering_down(hdev
))
2018 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) ||
2019 disconnected_whitelist_entries(hdev
))
2022 scan
= SCAN_DISABLED
;
2024 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
2025 scan
|= SCAN_INQUIRY
;
2027 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
) &&
2028 test_bit(HCI_ISCAN
, &hdev
->flags
) == !!(scan
& SCAN_INQUIRY
))
2031 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
2034 static int update_scan(struct hci_request
*req
, unsigned long opt
)
2036 hci_dev_lock(req
->hdev
);
2037 __hci_req_update_scan(req
);
2038 hci_dev_unlock(req
->hdev
);
2042 static void scan_update_work(struct work_struct
*work
)
2044 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, scan_update
);
2046 hci_req_sync(hdev
, update_scan
, 0, HCI_CMD_TIMEOUT
, NULL
);
2049 static int connectable_update(struct hci_request
*req
, unsigned long opt
)
2051 struct hci_dev
*hdev
= req
->hdev
;
2055 __hci_req_update_scan(req
);
2057 /* If BR/EDR is not enabled and we disable advertising as a
2058 * by-product of disabling connectable, we need to update the
2059 * advertising flags.
2061 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2062 __hci_req_update_adv_data(req
, hdev
->cur_adv_instance
);
2064 /* Update the advertising parameters if necessary */
2065 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2066 !list_empty(&hdev
->adv_instances
)) {
2067 if (ext_adv_capable(hdev
))
2068 __hci_req_start_ext_adv(req
, hdev
->cur_adv_instance
);
2070 __hci_req_enable_advertising(req
);
2073 __hci_update_background_scan(req
);
2075 hci_dev_unlock(hdev
);
2080 static void connectable_update_work(struct work_struct
*work
)
2082 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2083 connectable_update
);
2086 hci_req_sync(hdev
, connectable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
2087 mgmt_set_connectable_complete(hdev
, status
);
2090 static u8
get_service_classes(struct hci_dev
*hdev
)
2092 struct bt_uuid
*uuid
;
2095 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
2096 val
|= uuid
->svc_hint
;
2101 void __hci_req_update_class(struct hci_request
*req
)
2103 struct hci_dev
*hdev
= req
->hdev
;
2106 BT_DBG("%s", hdev
->name
);
2108 if (!hdev_is_powered(hdev
))
2111 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2114 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
2117 cod
[0] = hdev
->minor_class
;
2118 cod
[1] = hdev
->major_class
;
2119 cod
[2] = get_service_classes(hdev
);
2121 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
2124 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
2127 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
2130 static void write_iac(struct hci_request
*req
)
2132 struct hci_dev
*hdev
= req
->hdev
;
2133 struct hci_cp_write_current_iac_lap cp
;
2135 if (!hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
2138 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
)) {
2139 /* Limited discoverable mode */
2140 cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
2141 cp
.iac_lap
[0] = 0x00; /* LIAC */
2142 cp
.iac_lap
[1] = 0x8b;
2143 cp
.iac_lap
[2] = 0x9e;
2144 cp
.iac_lap
[3] = 0x33; /* GIAC */
2145 cp
.iac_lap
[4] = 0x8b;
2146 cp
.iac_lap
[5] = 0x9e;
2148 /* General discoverable mode */
2150 cp
.iac_lap
[0] = 0x33; /* GIAC */
2151 cp
.iac_lap
[1] = 0x8b;
2152 cp
.iac_lap
[2] = 0x9e;
2155 hci_req_add(req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
2156 (cp
.num_iac
* 3) + 1, &cp
);
2159 static int discoverable_update(struct hci_request
*req
, unsigned long opt
)
2161 struct hci_dev
*hdev
= req
->hdev
;
2165 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2167 __hci_req_update_scan(req
);
2168 __hci_req_update_class(req
);
2171 /* Advertising instances don't use the global discoverable setting, so
2172 * only update AD if advertising was enabled using Set Advertising.
2174 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2175 __hci_req_update_adv_data(req
, 0x00);
2177 /* Discoverable mode affects the local advertising
2178 * address in limited privacy mode.
2180 if (hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
)) {
2181 if (ext_adv_capable(hdev
))
2182 __hci_req_start_ext_adv(req
, 0x00);
2184 __hci_req_enable_advertising(req
);
2188 hci_dev_unlock(hdev
);
2193 static void discoverable_update_work(struct work_struct
*work
)
2195 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2196 discoverable_update
);
2199 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
2200 mgmt_set_discoverable_complete(hdev
, status
);
2203 void __hci_abort_conn(struct hci_request
*req
, struct hci_conn
*conn
,
2206 switch (conn
->state
) {
2209 if (conn
->type
== AMP_LINK
) {
2210 struct hci_cp_disconn_phy_link cp
;
2212 cp
.phy_handle
= HCI_PHY_HANDLE(conn
->handle
);
2214 hci_req_add(req
, HCI_OP_DISCONN_PHY_LINK
, sizeof(cp
),
2217 struct hci_cp_disconnect dc
;
2219 dc
.handle
= cpu_to_le16(conn
->handle
);
2221 hci_req_add(req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2224 conn
->state
= BT_DISCONN
;
2228 if (conn
->type
== LE_LINK
) {
2229 if (test_bit(HCI_CONN_SCANNING
, &conn
->flags
))
2231 hci_req_add(req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
2233 } else if (conn
->type
== ACL_LINK
) {
2234 if (req
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
2236 hci_req_add(req
, HCI_OP_CREATE_CONN_CANCEL
,
2241 if (conn
->type
== ACL_LINK
) {
2242 struct hci_cp_reject_conn_req rej
;
2244 bacpy(&rej
.bdaddr
, &conn
->dst
);
2245 rej
.reason
= reason
;
2247 hci_req_add(req
, HCI_OP_REJECT_CONN_REQ
,
2249 } else if (conn
->type
== SCO_LINK
|| conn
->type
== ESCO_LINK
) {
2250 struct hci_cp_reject_sync_conn_req rej
;
2252 bacpy(&rej
.bdaddr
, &conn
->dst
);
2254 /* SCO rejection has its own limited set of
2255 * allowed error values (0x0D-0x0F) which isn't
2256 * compatible with most values passed to this
2257 * function. To be safe hard-code one of the
2258 * values that's suitable for SCO.
2260 rej
.reason
= HCI_ERROR_REJ_LIMITED_RESOURCES
;
2262 hci_req_add(req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
2267 conn
->state
= BT_CLOSED
;
2272 static void abort_conn_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2275 BT_DBG("Failed to abort connection: status 0x%2.2x", status
);
2278 int hci_abort_conn(struct hci_conn
*conn
, u8 reason
)
2280 struct hci_request req
;
2283 hci_req_init(&req
, conn
->hdev
);
2285 __hci_abort_conn(&req
, conn
, reason
);
2287 err
= hci_req_run(&req
, abort_conn_complete
);
2288 if (err
&& err
!= -ENODATA
) {
2289 bt_dev_err(conn
->hdev
, "failed to run HCI request: err %d", err
);
2296 static int update_bg_scan(struct hci_request
*req
, unsigned long opt
)
2298 hci_dev_lock(req
->hdev
);
2299 __hci_update_background_scan(req
);
2300 hci_dev_unlock(req
->hdev
);
2304 static void bg_scan_update(struct work_struct
*work
)
2306 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2308 struct hci_conn
*conn
;
2312 err
= hci_req_sync(hdev
, update_bg_scan
, 0, HCI_CMD_TIMEOUT
, &status
);
2318 conn
= hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
);
2320 hci_le_conn_failed(conn
, status
);
2322 hci_dev_unlock(hdev
);
2325 static int le_scan_disable(struct hci_request
*req
, unsigned long opt
)
2327 hci_req_add_le_scan_disable(req
);
2331 static int bredr_inquiry(struct hci_request
*req
, unsigned long opt
)
2334 const u8 giac
[3] = { 0x33, 0x8b, 0x9e };
2335 const u8 liac
[3] = { 0x00, 0x8b, 0x9e };
2336 struct hci_cp_inquiry cp
;
2338 BT_DBG("%s", req
->hdev
->name
);
2340 hci_dev_lock(req
->hdev
);
2341 hci_inquiry_cache_flush(req
->hdev
);
2342 hci_dev_unlock(req
->hdev
);
2344 memset(&cp
, 0, sizeof(cp
));
2346 if (req
->hdev
->discovery
.limited
)
2347 memcpy(&cp
.lap
, liac
, sizeof(cp
.lap
));
2349 memcpy(&cp
.lap
, giac
, sizeof(cp
.lap
));
2353 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2358 static void le_scan_disable_work(struct work_struct
*work
)
2360 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2361 le_scan_disable
.work
);
2364 BT_DBG("%s", hdev
->name
);
2366 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2369 cancel_delayed_work(&hdev
->le_scan_restart
);
2371 hci_req_sync(hdev
, le_scan_disable
, 0, HCI_CMD_TIMEOUT
, &status
);
2373 bt_dev_err(hdev
, "failed to disable LE scan: status 0x%02x",
2378 hdev
->discovery
.scan_start
= 0;
2380 /* If we were running LE only scan, change discovery state. If
2381 * we were running both LE and BR/EDR inquiry simultaneously,
2382 * and BR/EDR inquiry is already finished, stop discovery,
2383 * otherwise BR/EDR inquiry will stop discovery when finished.
2384 * If we will resolve remote device name, do not change
2388 if (hdev
->discovery
.type
== DISCOV_TYPE_LE
)
2389 goto discov_stopped
;
2391 if (hdev
->discovery
.type
!= DISCOV_TYPE_INTERLEAVED
)
2394 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
)) {
2395 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
2396 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
2397 goto discov_stopped
;
2402 hci_req_sync(hdev
, bredr_inquiry
, DISCOV_INTERLEAVED_INQUIRY_LEN
,
2403 HCI_CMD_TIMEOUT
, &status
);
2405 bt_dev_err(hdev
, "inquiry failed: status 0x%02x", status
);
2406 goto discov_stopped
;
2413 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2414 hci_dev_unlock(hdev
);
2417 static int le_scan_restart(struct hci_request
*req
, unsigned long opt
)
2419 struct hci_dev
*hdev
= req
->hdev
;
2421 /* If controller is not scanning we are done. */
2422 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2425 hci_req_add_le_scan_disable(req
);
2427 if (use_ext_scan(hdev
)) {
2428 struct hci_cp_le_set_ext_scan_enable ext_enable_cp
;
2430 memset(&ext_enable_cp
, 0, sizeof(ext_enable_cp
));
2431 ext_enable_cp
.enable
= LE_SCAN_ENABLE
;
2432 ext_enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2434 hci_req_add(req
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
,
2435 sizeof(ext_enable_cp
), &ext_enable_cp
);
2437 struct hci_cp_le_set_scan_enable cp
;
2439 memset(&cp
, 0, sizeof(cp
));
2440 cp
.enable
= LE_SCAN_ENABLE
;
2441 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2442 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2448 static void le_scan_restart_work(struct work_struct
*work
)
2450 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2451 le_scan_restart
.work
);
2452 unsigned long timeout
, duration
, scan_start
, now
;
2455 BT_DBG("%s", hdev
->name
);
2457 hci_req_sync(hdev
, le_scan_restart
, 0, HCI_CMD_TIMEOUT
, &status
);
2459 bt_dev_err(hdev
, "failed to restart LE scan: status %d",
2466 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
2467 !hdev
->discovery
.scan_start
)
2470 /* When the scan was started, hdev->le_scan_disable has been queued
2471 * after duration from scan_start. During scan restart this job
2472 * has been canceled, and we need to queue it again after proper
2473 * timeout, to make sure that scan does not run indefinitely.
2475 duration
= hdev
->discovery
.scan_duration
;
2476 scan_start
= hdev
->discovery
.scan_start
;
2478 if (now
- scan_start
<= duration
) {
2481 if (now
>= scan_start
)
2482 elapsed
= now
- scan_start
;
2484 elapsed
= ULONG_MAX
- scan_start
+ now
;
2486 timeout
= duration
- elapsed
;
2491 queue_delayed_work(hdev
->req_workqueue
,
2492 &hdev
->le_scan_disable
, timeout
);
2495 hci_dev_unlock(hdev
);
2498 static int active_scan(struct hci_request
*req
, unsigned long opt
)
2500 uint16_t interval
= opt
;
2501 struct hci_dev
*hdev
= req
->hdev
;
2505 BT_DBG("%s", hdev
->name
);
2507 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
2510 /* Don't let discovery abort an outgoing connection attempt
2511 * that's using directed advertising.
2513 if (hci_lookup_le_connect(hdev
)) {
2514 hci_dev_unlock(hdev
);
2518 cancel_adv_timeout(hdev
);
2519 hci_dev_unlock(hdev
);
2521 __hci_req_disable_advertising(req
);
2524 /* If controller is scanning, it means the background scanning is
2525 * running. Thus, we should temporarily stop it in order to set the
2526 * discovery scanning parameters.
2528 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2529 hci_req_add_le_scan_disable(req
);
2531 /* All active scans will be done with either a resolvable private
2532 * address (when privacy feature has been enabled) or non-resolvable
2535 err
= hci_update_random_address(req
, true, scan_use_rpa(hdev
),
2538 own_addr_type
= ADDR_LE_DEV_PUBLIC
;
2540 hci_req_start_scan(req
, LE_SCAN_ACTIVE
, interval
, DISCOV_LE_SCAN_WIN
,
2545 static int interleaved_discov(struct hci_request
*req
, unsigned long opt
)
2549 BT_DBG("%s", req
->hdev
->name
);
2551 err
= active_scan(req
, opt
);
2555 return bredr_inquiry(req
, DISCOV_BREDR_INQUIRY_LEN
);
2558 static void start_discovery(struct hci_dev
*hdev
, u8
*status
)
2560 unsigned long timeout
;
2562 BT_DBG("%s type %u", hdev
->name
, hdev
->discovery
.type
);
2564 switch (hdev
->discovery
.type
) {
2565 case DISCOV_TYPE_BREDR
:
2566 if (!hci_dev_test_flag(hdev
, HCI_INQUIRY
))
2567 hci_req_sync(hdev
, bredr_inquiry
,
2568 DISCOV_BREDR_INQUIRY_LEN
, HCI_CMD_TIMEOUT
,
2571 case DISCOV_TYPE_INTERLEAVED
:
2572 /* When running simultaneous discovery, the LE scanning time
2573 * should occupy the whole discovery time sine BR/EDR inquiry
2574 * and LE scanning are scheduled by the controller.
2576 * For interleaving discovery in comparison, BR/EDR inquiry
2577 * and LE scanning are done sequentially with separate
2580 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
2582 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2583 /* During simultaneous discovery, we double LE scan
2584 * interval. We must leave some time for the controller
2585 * to do BR/EDR inquiry.
2587 hci_req_sync(hdev
, interleaved_discov
,
2588 DISCOV_LE_SCAN_INT
* 2, HCI_CMD_TIMEOUT
,
2593 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
2594 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2595 HCI_CMD_TIMEOUT
, status
);
2597 case DISCOV_TYPE_LE
:
2598 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2599 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2600 HCI_CMD_TIMEOUT
, status
);
2603 *status
= HCI_ERROR_UNSPECIFIED
;
2610 BT_DBG("%s timeout %u ms", hdev
->name
, jiffies_to_msecs(timeout
));
2612 /* When service discovery is used and the controller has a
2613 * strict duplicate filter, it is important to remember the
2614 * start and duration of the scan. This is required for
2615 * restarting scanning during the discovery phase.
2617 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) &&
2618 hdev
->discovery
.result_filtering
) {
2619 hdev
->discovery
.scan_start
= jiffies
;
2620 hdev
->discovery
.scan_duration
= timeout
;
2623 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_disable
,
2627 bool hci_req_stop_discovery(struct hci_request
*req
)
2629 struct hci_dev
*hdev
= req
->hdev
;
2630 struct discovery_state
*d
= &hdev
->discovery
;
2631 struct hci_cp_remote_name_req_cancel cp
;
2632 struct inquiry_entry
*e
;
2635 BT_DBG("%s state %u", hdev
->name
, hdev
->discovery
.state
);
2637 if (d
->state
== DISCOVERY_FINDING
|| d
->state
== DISCOVERY_STOPPING
) {
2638 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2639 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2641 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2642 cancel_delayed_work(&hdev
->le_scan_disable
);
2643 hci_req_add_le_scan_disable(req
);
2648 /* Passive scanning */
2649 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2650 hci_req_add_le_scan_disable(req
);
2655 /* No further actions needed for LE-only discovery */
2656 if (d
->type
== DISCOV_TYPE_LE
)
2659 if (d
->state
== DISCOVERY_RESOLVING
|| d
->state
== DISCOVERY_STOPPING
) {
2660 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
2665 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
2666 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
2674 static int stop_discovery(struct hci_request
*req
, unsigned long opt
)
2676 hci_dev_lock(req
->hdev
);
2677 hci_req_stop_discovery(req
);
2678 hci_dev_unlock(req
->hdev
);
2683 static void discov_update(struct work_struct
*work
)
2685 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2689 switch (hdev
->discovery
.state
) {
2690 case DISCOVERY_STARTING
:
2691 start_discovery(hdev
, &status
);
2692 mgmt_start_discovery_complete(hdev
, status
);
2694 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2696 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
2698 case DISCOVERY_STOPPING
:
2699 hci_req_sync(hdev
, stop_discovery
, 0, HCI_CMD_TIMEOUT
, &status
);
2700 mgmt_stop_discovery_complete(hdev
, status
);
2702 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2704 case DISCOVERY_STOPPED
:
2710 static void discov_off(struct work_struct
*work
)
2712 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2715 BT_DBG("%s", hdev
->name
);
2719 /* When discoverable timeout triggers, then just make sure
2720 * the limited discoverable flag is cleared. Even in the case
2721 * of a timeout triggered from general discoverable, it is
2722 * safe to unconditionally clear the flag.
2724 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2725 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2726 hdev
->discov_timeout
= 0;
2728 hci_dev_unlock(hdev
);
2730 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, NULL
);
2731 mgmt_new_settings(hdev
);
2734 static int powered_update_hci(struct hci_request
*req
, unsigned long opt
)
2736 struct hci_dev
*hdev
= req
->hdev
;
2741 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
2742 !lmp_host_ssp_capable(hdev
)) {
2745 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
2747 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
2750 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
2751 sizeof(support
), &support
);
2755 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2756 lmp_bredr_capable(hdev
)) {
2757 struct hci_cp_write_le_host_supported cp
;
2762 /* Check first if we already have the right
2763 * host state (host features set)
2765 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
2766 cp
.simul
!= lmp_host_le_br_capable(hdev
))
2767 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
2771 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2772 /* Make sure the controller has a good default for
2773 * advertising data. This also applies to the case
2774 * where BR/EDR was toggled during the AUTO_OFF phase.
2776 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2777 list_empty(&hdev
->adv_instances
)) {
2780 if (ext_adv_capable(hdev
)) {
2781 err
= __hci_req_setup_ext_adv_instance(req
,
2784 __hci_req_update_scan_rsp_data(req
,
2788 __hci_req_update_adv_data(req
, 0x00);
2789 __hci_req_update_scan_rsp_data(req
, 0x00);
2792 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2793 if (!ext_adv_capable(hdev
))
2794 __hci_req_enable_advertising(req
);
2796 __hci_req_enable_ext_advertising(req
,
2799 } else if (!list_empty(&hdev
->adv_instances
)) {
2800 struct adv_info
*adv_instance
;
2802 adv_instance
= list_first_entry(&hdev
->adv_instances
,
2803 struct adv_info
, list
);
2804 __hci_req_schedule_adv_instance(req
,
2805 adv_instance
->instance
,
2810 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
2811 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
2812 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
,
2813 sizeof(link_sec
), &link_sec
);
2815 if (lmp_bredr_capable(hdev
)) {
2816 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
2817 __hci_req_write_fast_connectable(req
, true);
2819 __hci_req_write_fast_connectable(req
, false);
2820 __hci_req_update_scan(req
);
2821 __hci_req_update_class(req
);
2822 __hci_req_update_name(req
);
2823 __hci_req_update_eir(req
);
2826 hci_dev_unlock(hdev
);
2830 int __hci_req_hci_power_on(struct hci_dev
*hdev
)
2832 /* Register the available SMP channels (BR/EDR and LE) only when
2833 * successfully powering on the controller. This late
2834 * registration is required so that LE SMP can clearly decide if
2835 * the public address or static address is used.
2839 return __hci_req_sync(hdev
, powered_update_hci
, 0, HCI_CMD_TIMEOUT
,
2843 void hci_request_setup(struct hci_dev
*hdev
)
2845 INIT_WORK(&hdev
->discov_update
, discov_update
);
2846 INIT_WORK(&hdev
->bg_scan_update
, bg_scan_update
);
2847 INIT_WORK(&hdev
->scan_update
, scan_update_work
);
2848 INIT_WORK(&hdev
->connectable_update
, connectable_update_work
);
2849 INIT_WORK(&hdev
->discoverable_update
, discoverable_update_work
);
2850 INIT_DELAYED_WORK(&hdev
->discov_off
, discov_off
);
2851 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2852 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
2853 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, adv_timeout_expire
);
2856 void hci_request_cancel_all(struct hci_dev
*hdev
)
2858 hci_req_sync_cancel(hdev
, ENODEV
);
2860 cancel_work_sync(&hdev
->discov_update
);
2861 cancel_work_sync(&hdev
->bg_scan_update
);
2862 cancel_work_sync(&hdev
->scan_update
);
2863 cancel_work_sync(&hdev
->connectable_update
);
2864 cancel_work_sync(&hdev
->discoverable_update
);
2865 cancel_delayed_work_sync(&hdev
->discov_off
);
2866 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2867 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
2869 if (hdev
->adv_instance_timeout
) {
2870 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
2871 hdev
->adv_instance_timeout
= 0;